Commit c15a2434 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6

* 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6: (24 commits)
  [XFS] Fix build failure after enabling CONFIG_XFS_DEBUG
  [XFS] remove dmapi cruft in xfs_file.c
  [XFS] remove sendfile leftovers
  [XFS] allow enabling CONFIG_XFS_DEBUG
  [XFS] Don't initialise new inode generation numbers to zero
  [XFS] Fix check for block zero access in xfs_write_iomap_allocate()
  [XFS] Don't double count reserved block changes on UP.
  [XFS] remove xfs_log_ticket_zone on rmmod
  [XFS] fix non-smp xfs build
  [XFS] Fix broken HAVE_SPLICE removal commit.
  [XFS] kill XFS_ICSB_SB_LOCKED
  [XFS] split xfs_icsb_balance_counter
  [XFS] Add xfs_icsb_sync_counters_locked for when m_sb_lock already held
  [XFS] Cleanup xfs_attr a bit with xfs_name and remove cred
  [XFS] kill usesless IHOLD calls in xfs_remove and xfs_rmdir
  [XFS] kill parent == child checks in xfs_remove and xfs_rmdir
  [XFS] kill usesless IHOLD calls in xfs_rename
  [XFS] remove manual lookup from xfs_rename and simplify locking
  [XFS] shrink mrlock_t
  [XFS] simplify xfs_lookup
  ...
parents a94a630a adaa693b
...@@ -64,3 +64,16 @@ config XFS_RT ...@@ -64,3 +64,16 @@ config XFS_RT
See the xfs man page in section 5 for additional information. See the xfs man page in section 5 for additional information.
If unsure, say N. If unsure, say N.
config XFS_DEBUG
bool "XFS Debugging support (EXPERIMENTAL)"
depends on XFS_FS && EXPERIMENTAL
help
Say Y here to get an XFS build with many debugging features,
including ASSERT checks, function wrappers around macros,
and extra sanity-checking functions in various code paths.
Note that the resulting code will be HUGE and SLOW, and probably
not useful unless you are debugging a particular problem.
Say N unless you are an XFS developer, or you play one on TV.
...@@ -20,29 +20,24 @@ ...@@ -20,29 +20,24 @@
#include <linux/rwsem.h> #include <linux/rwsem.h>
enum { MR_NONE, MR_ACCESS, MR_UPDATE };
typedef struct { typedef struct {
struct rw_semaphore mr_lock; struct rw_semaphore mr_lock;
#ifdef DEBUG
int mr_writer; int mr_writer;
#endif
} mrlock_t; } mrlock_t;
#ifdef DEBUG
#define mrinit(mrp, name) \ #define mrinit(mrp, name) \
do { (mrp)->mr_writer = 0; init_rwsem(&(mrp)->mr_lock); } while (0) do { (mrp)->mr_writer = 0; init_rwsem(&(mrp)->mr_lock); } while (0)
#else
#define mrinit(mrp, name) \
do { init_rwsem(&(mrp)->mr_lock); } while (0)
#endif
#define mrlock_init(mrp, t,n,s) mrinit(mrp, n) #define mrlock_init(mrp, t,n,s) mrinit(mrp, n)
#define mrfree(mrp) do { } while (0) #define mrfree(mrp) do { } while (0)
static inline void mraccess(mrlock_t *mrp)
{
down_read(&mrp->mr_lock);
}
static inline void mrupdate(mrlock_t *mrp)
{
down_write(&mrp->mr_lock);
mrp->mr_writer = 1;
}
static inline void mraccess_nested(mrlock_t *mrp, int subclass) static inline void mraccess_nested(mrlock_t *mrp, int subclass)
{ {
down_read_nested(&mrp->mr_lock, subclass); down_read_nested(&mrp->mr_lock, subclass);
...@@ -51,10 +46,11 @@ static inline void mraccess_nested(mrlock_t *mrp, int subclass) ...@@ -51,10 +46,11 @@ static inline void mraccess_nested(mrlock_t *mrp, int subclass)
static inline void mrupdate_nested(mrlock_t *mrp, int subclass) static inline void mrupdate_nested(mrlock_t *mrp, int subclass)
{ {
down_write_nested(&mrp->mr_lock, subclass); down_write_nested(&mrp->mr_lock, subclass);
#ifdef DEBUG
mrp->mr_writer = 1; mrp->mr_writer = 1;
#endif
} }
static inline int mrtryaccess(mrlock_t *mrp) static inline int mrtryaccess(mrlock_t *mrp)
{ {
return down_read_trylock(&mrp->mr_lock); return down_read_trylock(&mrp->mr_lock);
...@@ -64,39 +60,31 @@ static inline int mrtryupdate(mrlock_t *mrp) ...@@ -64,39 +60,31 @@ static inline int mrtryupdate(mrlock_t *mrp)
{ {
if (!down_write_trylock(&mrp->mr_lock)) if (!down_write_trylock(&mrp->mr_lock))
return 0; return 0;
#ifdef DEBUG
mrp->mr_writer = 1; mrp->mr_writer = 1;
#endif
return 1; return 1;
} }
static inline void mrunlock(mrlock_t *mrp) static inline void mrunlock_excl(mrlock_t *mrp)
{ {
if (mrp->mr_writer) { #ifdef DEBUG
mrp->mr_writer = 0; mrp->mr_writer = 0;
#endif
up_write(&mrp->mr_lock); up_write(&mrp->mr_lock);
} else { }
static inline void mrunlock_shared(mrlock_t *mrp)
{
up_read(&mrp->mr_lock); up_read(&mrp->mr_lock);
}
} }
static inline void mrdemote(mrlock_t *mrp) static inline void mrdemote(mrlock_t *mrp)
{ {
#ifdef DEBUG
mrp->mr_writer = 0; mrp->mr_writer = 0;
#endif
downgrade_write(&mrp->mr_lock); downgrade_write(&mrp->mr_lock);
} }
#ifdef DEBUG
/*
* Debug-only routine, without some platform-specific asm code, we can
* now only answer requests regarding whether we hold the lock for write
* (reader state is outside our visibility, we only track writer state).
* Note: means !ismrlocked would give false positives, so don't do that.
*/
static inline int ismrlocked(mrlock_t *mrp, int type)
{
if (mrp && type == MR_UPDATE)
return mrp->mr_writer;
return 1;
}
#endif
#endif /* __XFS_SUPPORT_MRLOCK_H__ */ #endif /* __XFS_SUPPORT_MRLOCK_H__ */
...@@ -886,7 +886,7 @@ int ...@@ -886,7 +886,7 @@ int
xfs_buf_lock_value( xfs_buf_lock_value(
xfs_buf_t *bp) xfs_buf_t *bp)
{ {
return atomic_read(&bp->b_sema.count); return bp->b_sema.count;
} }
#endif #endif
......
...@@ -133,7 +133,7 @@ xfs_nfs_get_inode( ...@@ -133,7 +133,7 @@ xfs_nfs_get_inode(
if (!ip) if (!ip)
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
if (!ip->i_d.di_mode || ip->i_d.di_gen != generation) { if (ip->i_d.di_gen != generation) {
xfs_iput_new(ip, XFS_ILOCK_SHARED); xfs_iput_new(ip, XFS_ILOCK_SHARED);
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
} }
......
...@@ -43,9 +43,6 @@ ...@@ -43,9 +43,6 @@
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
static struct vm_operations_struct xfs_file_vm_ops; static struct vm_operations_struct xfs_file_vm_ops;
#ifdef CONFIG_XFS_DMAPI
static struct vm_operations_struct xfs_dmapi_file_vm_ops;
#endif
STATIC_INLINE ssize_t STATIC_INLINE ssize_t
__xfs_file_read( __xfs_file_read(
...@@ -202,22 +199,6 @@ xfs_file_fsync( ...@@ -202,22 +199,6 @@ xfs_file_fsync(
(xfs_off_t)0, (xfs_off_t)-1); (xfs_off_t)0, (xfs_off_t)-1);
} }
#ifdef CONFIG_XFS_DMAPI
STATIC int
xfs_vm_fault(
struct vm_area_struct *vma,
struct vm_fault *vmf)
{
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
bhv_vnode_t *vp = vn_from_inode(inode);
ASSERT_ALWAYS(vp->v_vfsp->vfs_flag & VFS_DMI);
if (XFS_SEND_MMAP(XFS_VFSTOM(vp->v_vfsp), vma, 0))
return VM_FAULT_SIGBUS;
return filemap_fault(vma, vmf);
}
#endif /* CONFIG_XFS_DMAPI */
/* /*
* Unfortunately we can't just use the clean and simple readdir implementation * Unfortunately we can't just use the clean and simple readdir implementation
* below, because nfs might call back into ->lookup from the filldir callback * below, because nfs might call back into ->lookup from the filldir callback
...@@ -386,11 +367,6 @@ xfs_file_mmap( ...@@ -386,11 +367,6 @@ xfs_file_mmap(
vma->vm_ops = &xfs_file_vm_ops; vma->vm_ops = &xfs_file_vm_ops;
vma->vm_flags |= VM_CAN_NONLINEAR; vma->vm_flags |= VM_CAN_NONLINEAR;
#ifdef CONFIG_XFS_DMAPI
if (XFS_M(filp->f_path.dentry->d_inode->i_sb)->m_flags & XFS_MOUNT_DMAPI)
vma->vm_ops = &xfs_dmapi_file_vm_ops;
#endif /* CONFIG_XFS_DMAPI */
file_accessed(filp); file_accessed(filp);
return 0; return 0;
} }
...@@ -437,47 +413,6 @@ xfs_file_ioctl_invis( ...@@ -437,47 +413,6 @@ xfs_file_ioctl_invis(
return error; return error;
} }
#ifdef CONFIG_XFS_DMAPI
#ifdef HAVE_VMOP_MPROTECT
STATIC int
xfs_vm_mprotect(
struct vm_area_struct *vma,
unsigned int newflags)
{
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
struct xfs_mount *mp = XFS_M(inode->i_sb);
int error = 0;
if (mp->m_flags & XFS_MOUNT_DMAPI) {
if ((vma->vm_flags & VM_MAYSHARE) &&
(newflags & VM_WRITE) && !(vma->vm_flags & VM_WRITE))
error = XFS_SEND_MMAP(mp, vma, VM_WRITE);
}
return error;
}
#endif /* HAVE_VMOP_MPROTECT */
#endif /* CONFIG_XFS_DMAPI */
#ifdef HAVE_FOP_OPEN_EXEC
/* If the user is attempting to execute a file that is offline then
* we have to trigger a DMAPI READ event before the file is marked as busy
* otherwise the invisible I/O will not be able to write to the file to bring
* it back online.
*/
STATIC int
xfs_file_open_exec(
struct inode *inode)
{
struct xfs_mount *mp = XFS_M(inode->i_sb);
struct xfs_inode *ip = XFS_I(inode);
if (unlikely(mp->m_flags & XFS_MOUNT_DMAPI) &&
DM_EVENT_ENABLED(ip, DM_EVENT_READ))
return -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, 0, 0, 0, NULL);
return 0;
}
#endif /* HAVE_FOP_OPEN_EXEC */
/* /*
* mmap()d file has taken write protection fault and is being made * mmap()d file has taken write protection fault and is being made
* writable. We can set the page state up correctly for a writable * writable. We can set the page state up correctly for a writable
...@@ -546,13 +481,3 @@ static struct vm_operations_struct xfs_file_vm_ops = { ...@@ -546,13 +481,3 @@ static struct vm_operations_struct xfs_file_vm_ops = {
.fault = filemap_fault, .fault = filemap_fault,
.page_mkwrite = xfs_vm_page_mkwrite, .page_mkwrite = xfs_vm_page_mkwrite,
}; };
#ifdef CONFIG_XFS_DMAPI
static struct vm_operations_struct xfs_dmapi_file_vm_ops = {
.fault = xfs_vm_fault,
.page_mkwrite = xfs_vm_page_mkwrite,
#ifdef HAVE_VMOP_MPROTECT
.mprotect = xfs_vm_mprotect,
#endif
};
#endif /* CONFIG_XFS_DMAPI */
...@@ -238,7 +238,7 @@ xfs_vget_fsop_handlereq( ...@@ -238,7 +238,7 @@ xfs_vget_fsop_handlereq(
return error; return error;
if (ip == NULL) if (ip == NULL)
return XFS_ERROR(EIO); return XFS_ERROR(EIO);
if (ip->i_d.di_mode == 0 || ip->i_d.di_gen != igen) { if (ip->i_d.di_gen != igen) {
xfs_iput_new(ip, XFS_ILOCK_SHARED); xfs_iput_new(ip, XFS_ILOCK_SHARED);
return XFS_ERROR(ENOENT); return XFS_ERROR(ENOENT);
} }
...@@ -512,7 +512,7 @@ xfs_attrmulti_attr_get( ...@@ -512,7 +512,7 @@ xfs_attrmulti_attr_get(
if (!kbuf) if (!kbuf)
return ENOMEM; return ENOMEM;
error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags, NULL); error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags);
if (error) if (error)
goto out_kfree; goto out_kfree;
......
...@@ -511,7 +511,8 @@ xfs_vn_rename( ...@@ -511,7 +511,8 @@ xfs_vn_rename(
xfs_dentry_to_name(&nname, ndentry); xfs_dentry_to_name(&nname, ndentry);
error = xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode), error = xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode),
XFS_I(ndir), &nname); XFS_I(ndir), &nname, new_inode ?
XFS_I(new_inode) : NULL);
if (likely(!error)) { if (likely(!error)) {
if (new_inode) if (new_inode)
xfs_validate_fields(new_inode); xfs_validate_fields(new_inode);
......
...@@ -99,7 +99,6 @@ ...@@ -99,7 +99,6 @@
/* /*
* Feature macros (disable/enable) * Feature macros (disable/enable)
*/ */
#define HAVE_SPLICE /* a splice(2) exists in 2.6, but not in 2.4 */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ #define HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */
#else #else
......
...@@ -394,7 +394,7 @@ xfs_zero_last_block( ...@@ -394,7 +394,7 @@ xfs_zero_last_block(
int error = 0; int error = 0;
xfs_bmbt_irec_t imap; xfs_bmbt_irec_t imap;
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
zero_offset = XFS_B_FSB_OFFSET(mp, isize); zero_offset = XFS_B_FSB_OFFSET(mp, isize);
if (zero_offset == 0) { if (zero_offset == 0) {
...@@ -425,14 +425,14 @@ xfs_zero_last_block( ...@@ -425,14 +425,14 @@ xfs_zero_last_block(
* out sync. We need to drop the ilock while we do this so we * out sync. We need to drop the ilock while we do this so we
* don't deadlock when the buffer cache calls back to us. * don't deadlock when the buffer cache calls back to us.
*/ */
xfs_iunlock(ip, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD); xfs_iunlock(ip, XFS_ILOCK_EXCL);
zero_len = mp->m_sb.sb_blocksize - zero_offset; zero_len = mp->m_sb.sb_blocksize - zero_offset;
if (isize + zero_len > offset) if (isize + zero_len > offset)
zero_len = offset - isize; zero_len = offset - isize;
error = xfs_iozero(ip, isize, zero_len); error = xfs_iozero(ip, isize, zero_len);
xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); xfs_ilock(ip, XFS_ILOCK_EXCL);
ASSERT(error >= 0); ASSERT(error >= 0);
return error; return error;
} }
...@@ -465,8 +465,7 @@ xfs_zero_eof( ...@@ -465,8 +465,7 @@ xfs_zero_eof(
int error = 0; int error = 0;
xfs_bmbt_irec_t imap; xfs_bmbt_irec_t imap;
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
ASSERT(offset > isize); ASSERT(offset > isize);
/* /*
...@@ -475,8 +474,7 @@ xfs_zero_eof( ...@@ -475,8 +474,7 @@ xfs_zero_eof(
*/ */
error = xfs_zero_last_block(ip, offset, isize); error = xfs_zero_last_block(ip, offset, isize);
if (error) { if (error) {
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
return error; return error;
} }
...@@ -507,8 +505,7 @@ xfs_zero_eof( ...@@ -507,8 +505,7 @@ xfs_zero_eof(
error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb, error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
0, NULL, 0, &imap, &nimaps, NULL, NULL); 0, NULL, 0, &imap, &nimaps, NULL, NULL);
if (error) { if (error) {
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
return error; return error;
} }
ASSERT(nimaps > 0); ASSERT(nimaps > 0);
...@@ -532,7 +529,7 @@ xfs_zero_eof( ...@@ -532,7 +529,7 @@ xfs_zero_eof(
* Drop the inode lock while we're doing the I/O. * Drop the inode lock while we're doing the I/O.
* We'll still have the iolock to protect us. * We'll still have the iolock to protect us.
*/ */
xfs_iunlock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); xfs_iunlock(ip, XFS_ILOCK_EXCL);
zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
...@@ -548,13 +545,13 @@ xfs_zero_eof( ...@@ -548,13 +545,13 @@ xfs_zero_eof(
start_zero_fsb = imap.br_startoff + imap.br_blockcount; start_zero_fsb = imap.br_startoff + imap.br_blockcount;
ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); xfs_ilock(ip, XFS_ILOCK_EXCL);
} }
return 0; return 0;
out_lock: out_lock:
xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); xfs_ilock(ip, XFS_ILOCK_EXCL);
ASSERT(error >= 0); ASSERT(error >= 0);
return error; return error;
} }
......
...@@ -50,7 +50,6 @@ struct xfs_iomap; ...@@ -50,7 +50,6 @@ struct xfs_iomap;
#define XFS_INVAL_CACHED 18 #define XFS_INVAL_CACHED 18
#define XFS_DIORD_ENTER 19 #define XFS_DIORD_ENTER 19
#define XFS_DIOWR_ENTER 20 #define XFS_DIOWR_ENTER 20
#define XFS_SENDFILE_ENTER 21
#define XFS_WRITEPAGE_ENTER 22 #define XFS_WRITEPAGE_ENTER 22
#define XFS_RELEASEPAGE_ENTER 23 #define XFS_RELEASEPAGE_ENTER 23
#define XFS_INVALIDPAGE_ENTER 24 #define XFS_INVALIDPAGE_ENTER 24
......
...@@ -1181,7 +1181,7 @@ xfs_fs_statfs( ...@@ -1181,7 +1181,7 @@ xfs_fs_statfs(
statp->f_fsid.val[0] = (u32)id; statp->f_fsid.val[0] = (u32)id;
statp->f_fsid.val[1] = (u32)(id >> 32); statp->f_fsid.val[1] = (u32)(id >> 32);
xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT); xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
spin_lock(&mp->m_sb_lock); spin_lock(&mp->m_sb_lock);
statp->f_bsize = sbp->sb_blocksize; statp->f_bsize = sbp->sb_blocksize;
......
...@@ -25,12 +25,6 @@ struct attrlist_cursor_kern; ...@@ -25,12 +25,6 @@ struct attrlist_cursor_kern;
typedef struct inode bhv_vnode_t; typedef struct inode bhv_vnode_t;
#define VN_ISLNK(vp) S_ISLNK((vp)->i_mode)
#define VN_ISREG(vp) S_ISREG((vp)->i_mode)
#define VN_ISDIR(vp) S_ISDIR((vp)->i_mode)
#define VN_ISCHR(vp) S_ISCHR((vp)->i_mode)
#define VN_ISBLK(vp) S_ISBLK((vp)->i_mode)
/* /*
* Vnode to Linux inode mapping. * Vnode to Linux inode mapping.
*/ */
...@@ -151,24 +145,6 @@ typedef struct bhv_vattr { ...@@ -151,24 +145,6 @@ typedef struct bhv_vattr {
XFS_AT_TYPE|XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_VCODE|\ XFS_AT_TYPE|XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_VCODE|\
XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|XFS_AT_GENCOUNT) XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|XFS_AT_GENCOUNT)
/*
* Modes.
*/
#define VSUID S_ISUID /* set user id on execution */
#define VSGID S_ISGID /* set group id on execution */
#define VSVTX S_ISVTX /* save swapped text even after use */
#define VREAD S_IRUSR /* read, write, execute permissions */
#define VWRITE S_IWUSR
#define VEXEC S_IXUSR
#define MODEMASK S_IALLUGO /* mode bits plus permission bits */
/*
* Check whether mandatory file locking is enabled.
*/
#define MANDLOCK(vp, mode) \
(VN_ISREG(vp) && ((mode) & (VSGID|(VEXEC>>3))) == VSGID)
extern void vn_init(void); extern void vn_init(void);
extern int vn_revalidate(bhv_vnode_t *); extern int vn_revalidate(bhv_vnode_t *);
......
...@@ -933,7 +933,7 @@ xfs_qm_dqget( ...@@ -933,7 +933,7 @@ xfs_qm_dqget(
type == XFS_DQ_PROJ || type == XFS_DQ_PROJ ||
type == XFS_DQ_GROUP); type == XFS_DQ_GROUP);
if (ip) { if (ip) {
ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (type == XFS_DQ_USER) if (type == XFS_DQ_USER)
ASSERT(ip->i_udquot == NULL); ASSERT(ip->i_udquot == NULL);
else else
...@@ -1088,7 +1088,7 @@ xfs_qm_dqget( ...@@ -1088,7 +1088,7 @@ xfs_qm_dqget(
xfs_qm_mplist_unlock(mp); xfs_qm_mplist_unlock(mp);
XFS_DQ_HASH_UNLOCK(h); XFS_DQ_HASH_UNLOCK(h);
dqret: dqret:
ASSERT((ip == NULL) || XFS_ISLOCKED_INODE_EXCL(ip)); ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
xfs_dqtrace_entry(dqp, "DQGET DONE"); xfs_dqtrace_entry(dqp, "DQGET DONE");
*O_dqpp = dqp; *O_dqpp = dqp;
return (0); return (0);
......
...@@ -670,7 +670,7 @@ xfs_qm_dqattach_one( ...@@ -670,7 +670,7 @@ xfs_qm_dqattach_one(
xfs_dquot_t *dqp; xfs_dquot_t *dqp;
int error; int error;
ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
error = 0; error = 0;
/* /*
* See if we already have it in the inode itself. IO_idqpp is * See if we already have it in the inode itself. IO_idqpp is
...@@ -874,7 +874,7 @@ xfs_qm_dqattach( ...@@ -874,7 +874,7 @@ xfs_qm_dqattach(
return 0; return 0;
ASSERT((flags & XFS_QMOPT_ILOCKED) == 0 || ASSERT((flags & XFS_QMOPT_ILOCKED) == 0 ||
XFS_ISLOCKED_INODE_EXCL(ip)); xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (! (flags & XFS_QMOPT_ILOCKED)) if (! (flags & XFS_QMOPT_ILOCKED))
xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_ilock(ip, XFS_ILOCK_EXCL);
...@@ -888,7 +888,8 @@ xfs_qm_dqattach( ...@@ -888,7 +888,8 @@ xfs_qm_dqattach(
goto done; goto done;
nquotas++; nquotas++;
} }
ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (XFS_IS_OQUOTA_ON(mp)) { if (XFS_IS_OQUOTA_ON(mp)) {
error = XFS_IS_GQUOTA_ON(mp) ? error = XFS_IS_GQUOTA_ON(mp) ?
xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
...@@ -913,7 +914,7 @@ xfs_qm_dqattach( ...@@ -913,7 +914,7 @@ xfs_qm_dqattach(
* This WON'T, in general, result in a thrash. * This WON'T, in general, result in a thrash.
*/ */
if (nquotas == 2) { if (nquotas == 2) {
ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(ip->i_udquot); ASSERT(ip->i_udquot);
ASSERT(ip->i_gdquot); ASSERT(ip->i_gdquot);
...@@ -956,7 +957,7 @@ xfs_qm_dqattach( ...@@ -956,7 +957,7 @@ xfs_qm_dqattach(
#ifdef QUOTADEBUG #ifdef QUOTADEBUG
else else
ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
#endif #endif
return error; return error;
} }
...@@ -1291,7 +1292,7 @@ xfs_qm_dqget_noattach( ...@@ -1291,7 +1292,7 @@ xfs_qm_dqget_noattach(
xfs_mount_t *mp; xfs_mount_t *mp;
xfs_dquot_t *udqp, *gdqp; xfs_dquot_t *udqp, *gdqp;
ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
mp = ip->i_mount; mp = ip->i_mount;
udqp = NULL; udqp = NULL;
gdqp = NULL; gdqp = NULL;
...@@ -1392,7 +1393,7 @@ xfs_qm_qino_alloc( ...@@ -1392,7 +1393,7 @@ xfs_qm_qino_alloc(
* Keep an extra reference to this quota inode. This inode is * Keep an extra reference to this quota inode. This inode is
* locked exclusively and joined to the transaction already. * locked exclusively and joined to the transaction already.
*/ */
ASSERT(XFS_ISLOCKED_INODE_EXCL(*ip)); ASSERT(xfs_isilocked(*ip, XFS_ILOCK_EXCL));
VN_HOLD(XFS_ITOV((*ip))); VN_HOLD(XFS_ITOV((*ip)));
/* /*
...@@ -1737,12 +1738,6 @@ xfs_qm_dqusage_adjust( ...@@ -1737,12 +1738,6 @@ xfs_qm_dqusage_adjust(
return error; return error;
} }
if (ip->i_d.di_mode == 0) {
xfs_iput_new(ip, XFS_ILOCK_EXCL);
*res = BULKSTAT_RV_NOTHING;
return XFS_ERROR(ENOENT);
}
/* /*
* Obtain the locked dquots. In case of an error (eg. allocation * Obtain the locked dquots. In case of an error (eg. allocation
* fails for ENOSPC), we return the negative of the error number * fails for ENOSPC), we return the negative of the error number
...@@ -2563,7 +2558,7 @@ xfs_qm_vop_chown( ...@@ -2563,7 +2558,7 @@ xfs_qm_vop_chown(
uint bfield = XFS_IS_REALTIME_INODE(ip) ? uint bfield = XFS_IS_REALTIME_INODE(ip) ?
XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT; XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
/* old dquot */ /* old dquot */
...@@ -2607,7 +2602,7 @@ xfs_qm_vop_chown_reserve( ...@@ -2607,7 +2602,7 @@ xfs_qm_vop_chown_reserve(
uint delblks, blkflags, prjflags = 0; uint delblks, blkflags, prjflags = 0;
xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq; xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq;
ASSERT(XFS_ISLOCKED_INODE(ip)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
mp = ip->i_mount; mp = ip->i_mount;
ASSERT(XFS_IS_QUOTA_RUNNING(mp)); ASSERT(XFS_IS_QUOTA_RUNNING(mp));
...@@ -2717,7 +2712,7 @@ xfs_qm_vop_dqattach_and_dqmod_newinode( ...@@ -2717,7 +2712,7 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
if (!XFS_IS_QUOTA_ON(tp->t_mountp)) if (!XFS_IS_QUOTA_ON(tp->t_mountp))
return; return;
ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp)); ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
if (udqp) { if (udqp) {
......
...@@ -1366,12 +1366,6 @@ xfs_qm_internalqcheck_adjust( ...@@ -1366,12 +1366,6 @@ xfs_qm_internalqcheck_adjust(
return (error); return (error);
} }
if (ip->i_d.di_mode == 0) {
xfs_iput_new(ip, lock_flags);
*res = BULKSTAT_RV_NOTHING;
return XFS_ERROR(ENOENT);
}
/* /*
* This inode can have blocks after eof which can get released * This inode can have blocks after eof which can get released
* when we send it to inactive. Since we don't check the dquot * when we send it to inactive. Since we don't check the dquot
......
...@@ -27,11 +27,6 @@ ...@@ -27,11 +27,6 @@
/* Number of dquots that fit in to a dquot block */ /* Number of dquots that fit in to a dquot block */
#define XFS_QM_DQPERBLK(mp) ((mp)->m_quotainfo->qi_dqperchunk) #define XFS_QM_DQPERBLK(mp) ((mp)->m_quotainfo->qi_dqperchunk)
#define XFS_ISLOCKED_INODE(ip) (ismrlocked(&(ip)->i_lock, \
MR_UPDATE | MR_ACCESS) != 0)
#define XFS_ISLOCKED_INODE_EXCL(ip) (ismrlocked(&(ip)->i_lock, \
MR_UPDATE) != 0)
#define XFS_DQ_IS_ADDEDTO_TRX(t, d) ((d)->q_transp == (t)) #define XFS_DQ_IS_ADDEDTO_TRX(t, d) ((d)->q_transp == (t))
#define XFS_QI_MPLRECLAIMS(mp) ((mp)->m_quotainfo->qi_dqreclaims) #define XFS_QI_MPLRECLAIMS(mp) ((mp)->m_quotainfo->qi_dqreclaims)
......
...@@ -834,7 +834,7 @@ xfs_trans_reserve_quota_nblks( ...@@ -834,7 +834,7 @@ xfs_trans_reserve_quota_nblks(
ASSERT(ip->i_ino != mp->m_sb.sb_uquotino); ASSERT(ip->i_ino != mp->m_sb.sb_uquotino);
ASSERT(ip->i_ino != mp->m_sb.sb_gquotino); ASSERT(ip->i_ino != mp->m_sb.sb_gquotino);
ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) == ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
XFS_TRANS_DQ_RES_RTBLKS || XFS_TRANS_DQ_RES_RTBLKS ||
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#define STATIC #define STATIC
#define DEBUG 1 #define DEBUG 1
#define XFS_BUF_LOCK_TRACKING 1 #define XFS_BUF_LOCK_TRACKING 1
#define QUOTADEBUG 1 /* #define QUOTADEBUG 1 */
#endif #endif
#ifdef CONFIG_XFS_TRACE #ifdef CONFIG_XFS_TRACE
......
...@@ -72,7 +72,7 @@ xfs_acl_vhasacl_default( ...@@ -72,7 +72,7 @@ xfs_acl_vhasacl_default(
{ {
int error; int error;
if (!VN_ISDIR(vp)) if (!S_ISDIR(vp->i_mode))
return 0; return 0;
xfs_acl_get_attr(vp, NULL, _ACL_TYPE_DEFAULT, ATTR_KERNOVAL, &error); xfs_acl_get_attr(vp, NULL, _ACL_TYPE_DEFAULT, ATTR_KERNOVAL, &error);
return (error == 0); return (error == 0);
...@@ -238,15 +238,8 @@ xfs_acl_vget( ...@@ -238,15 +238,8 @@ xfs_acl_vget(
error = EINVAL; error = EINVAL;
goto out; goto out;
} }
if (kind == _ACL_TYPE_ACCESS) { if (kind == _ACL_TYPE_ACCESS)
bhv_vattr_t va; xfs_acl_sync_mode(xfs_vtoi(vp)->i_d.di_mode, xfs_acl);
va.va_mask = XFS_AT_MODE;
error = xfs_getattr(xfs_vtoi(vp), &va, 0);
if (error)
goto out;
xfs_acl_sync_mode(va.va_mode, xfs_acl);
}
error = -posix_acl_xfs_to_xattr(xfs_acl, ext_acl, size); error = -posix_acl_xfs_to_xattr(xfs_acl, ext_acl, size);
} }
out: out:
...@@ -341,14 +334,15 @@ xfs_acl_iaccess( ...@@ -341,14 +334,15 @@ xfs_acl_iaccess(
{ {
xfs_acl_t *acl; xfs_acl_t *acl;
int rval; int rval;
struct xfs_name acl_name = {SGI_ACL_FILE, SGI_ACL_FILE_SIZE};
if (!(_ACL_ALLOC(acl))) if (!(_ACL_ALLOC(acl)))
return -1; return -1;
/* If the file has no ACL return -1. */ /* If the file has no ACL return -1. */
rval = sizeof(xfs_acl_t); rval = sizeof(xfs_acl_t);
if (xfs_attr_fetch(ip, SGI_ACL_FILE, SGI_ACL_FILE_SIZE, if (xfs_attr_fetch(ip, &acl_name, (char *)acl, &rval,
(char *)acl, &rval, ATTR_ROOT | ATTR_KERNACCESS, cr)) { ATTR_ROOT | ATTR_KERNACCESS)) {
_ACL_FREE(acl); _ACL_FREE(acl);
return -1; return -1;
} }
...@@ -373,23 +367,15 @@ xfs_acl_allow_set( ...@@ -373,23 +367,15 @@ xfs_acl_allow_set(
bhv_vnode_t *vp, bhv_vnode_t *vp,
int kind) int kind)
{ {
xfs_inode_t *ip = xfs_vtoi(vp);
bhv_vattr_t va;
int error;
if (vp->i_flags & (S_IMMUTABLE|S_APPEND)) if (vp->i_flags & (S_IMMUTABLE|S_APPEND))
return EPERM; return EPERM;
if (kind == _ACL_TYPE_DEFAULT && !VN_ISDIR(vp)) if (kind == _ACL_TYPE_DEFAULT && !S_ISDIR(vp->i_mode))
return ENOTDIR; return ENOTDIR;
if (vp->i_sb->s_flags & MS_RDONLY) if (vp->i_sb->s_flags & MS_RDONLY)
return EROFS; return EROFS;
va.va_mask = XFS_AT_UID; if (xfs_vtoi(vp)->i_d.di_uid != current->fsuid && !capable(CAP_FOWNER))
error = xfs_getattr(ip, &va, 0);
if (error)
return error;
if (va.va_uid != current->fsuid && !capable(CAP_FOWNER))
return EPERM; return EPERM;
return error; return 0;
} }
/* /*
...@@ -594,7 +580,7 @@ xfs_acl_get_attr( ...@@ -594,7 +580,7 @@ xfs_acl_get_attr(
*error = xfs_attr_get(xfs_vtoi(vp), *error = xfs_attr_get(xfs_vtoi(vp),
kind == _ACL_TYPE_ACCESS ? kind == _ACL_TYPE_ACCESS ?
SGI_ACL_FILE : SGI_ACL_DEFAULT, SGI_ACL_FILE : SGI_ACL_DEFAULT,
(char *)aclp, &len, flags, sys_cred); (char *)aclp, &len, flags);
if (*error || (flags & ATTR_KERNOVAL)) if (*error || (flags & ATTR_KERNOVAL))
return; return;
xfs_acl_get_endian(aclp); xfs_acl_get_endian(aclp);
...@@ -643,7 +629,6 @@ xfs_acl_vtoacl( ...@@ -643,7 +629,6 @@ xfs_acl_vtoacl(
xfs_acl_t *access_acl, xfs_acl_t *access_acl,
xfs_acl_t *default_acl) xfs_acl_t *default_acl)
{ {
bhv_vattr_t va;
int error = 0; int error = 0;
if (access_acl) { if (access_acl) {
...@@ -652,16 +637,10 @@ xfs_acl_vtoacl( ...@@ -652,16 +637,10 @@ xfs_acl_vtoacl(
* be obtained for some reason, invalidate the access ACL. * be obtained for some reason, invalidate the access ACL.
*/ */
xfs_acl_get_attr(vp, access_acl, _ACL_TYPE_ACCESS, 0, &error); xfs_acl_get_attr(vp, access_acl, _ACL_TYPE_ACCESS, 0, &error);
if (!error) {
/* Got the ACL, need the mode... */
va.va_mask = XFS_AT_MODE;
error = xfs_getattr(xfs_vtoi(vp), &va, 0);
}
if (error) if (error)
access_acl->acl_cnt = XFS_ACL_NOT_PRESENT; access_acl->acl_cnt = XFS_ACL_NOT_PRESENT;
else /* We have a good ACL and the file mode, synchronize. */ else /* We have a good ACL and the file mode, synchronize. */
xfs_acl_sync_mode(va.va_mode, access_acl); xfs_acl_sync_mode(xfs_vtoi(vp)->i_d.di_mode, access_acl);
} }
if (default_acl) { if (default_acl) {
...@@ -719,7 +698,7 @@ xfs_acl_inherit( ...@@ -719,7 +698,7 @@ xfs_acl_inherit(
* If the new file is a directory, its default ACL is a copy of * If the new file is a directory, its default ACL is a copy of
* the containing directory's default ACL. * the containing directory's default ACL.
*/ */
if (VN_ISDIR(vp)) if (S_ISDIR(vp->i_mode))
xfs_acl_set_attr(vp, pdaclp, _ACL_TYPE_DEFAULT, &error); xfs_acl_set_attr(vp, pdaclp, _ACL_TYPE_DEFAULT, &error);
if (!error && !basicperms) if (!error && !basicperms)
xfs_acl_set_attr(vp, cacl, _ACL_TYPE_ACCESS, &error); xfs_acl_set_attr(vp, cacl, _ACL_TYPE_ACCESS, &error);
...@@ -744,7 +723,7 @@ xfs_acl_setmode( ...@@ -744,7 +723,7 @@ xfs_acl_setmode(
bhv_vattr_t va; bhv_vattr_t va;
xfs_acl_entry_t *ap; xfs_acl_entry_t *ap;
xfs_acl_entry_t *gap = NULL; xfs_acl_entry_t *gap = NULL;
int i, error, nomask = 1; int i, nomask = 1;
*basicperms = 1; *basicperms = 1;
...@@ -756,11 +735,7 @@ xfs_acl_setmode( ...@@ -756,11 +735,7 @@ xfs_acl_setmode(
* mode. The m:: bits take precedence over the g:: bits. * mode. The m:: bits take precedence over the g:: bits.
*/ */
va.va_mask = XFS_AT_MODE; va.va_mask = XFS_AT_MODE;
error = xfs_getattr(xfs_vtoi(vp), &va, 0); va.va_mode = xfs_vtoi(vp)->i_d.di_mode;
if (error)
return error;
va.va_mask = XFS_AT_MODE;
va.va_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO); va.va_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO);
ap = acl->acl_entry; ap = acl->acl_entry;
for (i = 0; i < acl->acl_cnt; ++i) { for (i = 0; i < acl->acl_cnt; ++i) {
......
...@@ -101,14 +101,28 @@ STATIC int xfs_attr_rmtval_remove(xfs_da_args_t *args); ...@@ -101,14 +101,28 @@ STATIC int xfs_attr_rmtval_remove(xfs_da_args_t *args);
ktrace_t *xfs_attr_trace_buf; ktrace_t *xfs_attr_trace_buf;
#endif #endif
STATIC int
xfs_attr_name_to_xname(
struct xfs_name *xname,
const char *aname)
{
if (!aname)
return EINVAL;
xname->name = aname;
xname->len = strlen(aname);
if (xname->len >= MAXNAMELEN)
return EFAULT; /* match IRIX behaviour */
return 0;
}
/*======================================================================== /*========================================================================
* Overall external interface routines. * Overall external interface routines.
*========================================================================*/ *========================================================================*/
int int
xfs_attr_fetch(xfs_inode_t *ip, const char *name, int namelen, xfs_attr_fetch(xfs_inode_t *ip, struct xfs_name *name,
char *value, int *valuelenp, int flags, struct cred *cred) char *value, int *valuelenp, int flags)
{ {
xfs_da_args_t args; xfs_da_args_t args;
int error; int error;
...@@ -122,8 +136,8 @@ xfs_attr_fetch(xfs_inode_t *ip, const char *name, int namelen, ...@@ -122,8 +136,8 @@ xfs_attr_fetch(xfs_inode_t *ip, const char *name, int namelen,
* Fill in the arg structure for this request. * Fill in the arg structure for this request.
*/ */
memset((char *)&args, 0, sizeof(args)); memset((char *)&args, 0, sizeof(args));
args.name = name; args.name = name->name;
args.namelen = namelen; args.namelen = name->len;
args.value = value; args.value = value;
args.valuelen = *valuelenp; args.valuelen = *valuelenp;
args.flags = flags; args.flags = flags;
...@@ -162,30 +176,28 @@ xfs_attr_get( ...@@ -162,30 +176,28 @@ xfs_attr_get(
const char *name, const char *name,
char *value, char *value,
int *valuelenp, int *valuelenp,
int flags, int flags)
cred_t *cred)
{ {
int error, namelen; int error;
struct xfs_name xname;
XFS_STATS_INC(xs_attr_get); XFS_STATS_INC(xs_attr_get);
if (!name)
return(EINVAL);
namelen = strlen(name);
if (namelen >= MAXNAMELEN)
return(EFAULT); /* match IRIX behaviour */
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return(EIO); return(EIO);
error = xfs_attr_name_to_xname(&xname, name);
if (error)
return error;
xfs_ilock(ip, XFS_ILOCK_SHARED); xfs_ilock(ip, XFS_ILOCK_SHARED);
error = xfs_attr_fetch(ip, name, namelen, value, valuelenp, flags, cred); error = xfs_attr_fetch(ip, &xname, value, valuelenp, flags);
xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_iunlock(ip, XFS_ILOCK_SHARED);
return(error); return(error);
} }
int STATIC int
xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen, xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
char *value, int valuelen, int flags) char *value, int valuelen, int flags)
{ {
xfs_da_args_t args; xfs_da_args_t args;
...@@ -209,7 +221,7 @@ xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen, ...@@ -209,7 +221,7 @@ xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen,
*/ */
if (XFS_IFORK_Q(dp) == 0) { if (XFS_IFORK_Q(dp) == 0) {
int sf_size = sizeof(xfs_attr_sf_hdr_t) + int sf_size = sizeof(xfs_attr_sf_hdr_t) +
XFS_ATTR_SF_ENTSIZE_BYNAME(namelen, valuelen); XFS_ATTR_SF_ENTSIZE_BYNAME(name->len, valuelen);
if ((error = xfs_bmap_add_attrfork(dp, sf_size, rsvd))) if ((error = xfs_bmap_add_attrfork(dp, sf_size, rsvd)))
return(error); return(error);
...@@ -219,8 +231,8 @@ xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen, ...@@ -219,8 +231,8 @@ xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen,
* Fill in the arg structure for this request. * Fill in the arg structure for this request.
*/ */
memset((char *)&args, 0, sizeof(args)); memset((char *)&args, 0, sizeof(args));
args.name = name; args.name = name->name;
args.namelen = namelen; args.namelen = name->len;
args.value = value; args.value = value;
args.valuelen = valuelen; args.valuelen = valuelen;
args.flags = flags; args.flags = flags;
...@@ -236,7 +248,7 @@ xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen, ...@@ -236,7 +248,7 @@ xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen,
* Determine space new attribute will use, and if it would be * Determine space new attribute will use, and if it would be
* "local" or "remote" (note: local != inline). * "local" or "remote" (note: local != inline).
*/ */
size = xfs_attr_leaf_newentsize(namelen, valuelen, size = xfs_attr_leaf_newentsize(name->len, valuelen,
mp->m_sb.sb_blocksize, &local); mp->m_sb.sb_blocksize, &local);
nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK); nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK);
...@@ -429,26 +441,27 @@ xfs_attr_set( ...@@ -429,26 +441,27 @@ xfs_attr_set(
int valuelen, int valuelen,
int flags) int flags)
{ {
int namelen; int error;
struct xfs_name xname;
namelen = strlen(name);
if (namelen >= MAXNAMELEN)
return EFAULT; /* match IRIX behaviour */
XFS_STATS_INC(xs_attr_set); XFS_STATS_INC(xs_attr_set);
if (XFS_FORCED_SHUTDOWN(dp->i_mount)) if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return (EIO); return (EIO);
return xfs_attr_set_int(dp, name, namelen, value, valuelen, flags); error = xfs_attr_name_to_xname(&xname, name);
if (error)
return error;
return xfs_attr_set_int(dp, &xname, value, valuelen, flags);
} }
/* /*
* Generic handler routine to remove a name from an attribute list. * Generic handler routine to remove a name from an attribute list.
* Transitions attribute list from Btree to shortform as necessary. * Transitions attribute list from Btree to shortform as necessary.
*/ */
int STATIC int
xfs_attr_remove_int(xfs_inode_t *dp, const char *name, int namelen, int flags) xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
{ {
xfs_da_args_t args; xfs_da_args_t args;
xfs_fsblock_t firstblock; xfs_fsblock_t firstblock;
...@@ -460,8 +473,8 @@ xfs_attr_remove_int(xfs_inode_t *dp, const char *name, int namelen, int flags) ...@@ -460,8 +473,8 @@ xfs_attr_remove_int(xfs_inode_t *dp, const char *name, int namelen, int flags)
* Fill in the arg structure for this request. * Fill in the arg structure for this request.
*/ */
memset((char *)&args, 0, sizeof(args)); memset((char *)&args, 0, sizeof(args));
args.name = name; args.name = name->name;
args.namelen = namelen; args.namelen = name->len;
args.flags = flags; args.flags = flags;
args.hashval = xfs_da_hashname(args.name, args.namelen); args.hashval = xfs_da_hashname(args.name, args.namelen);
args.dp = dp; args.dp = dp;
...@@ -575,17 +588,18 @@ xfs_attr_remove( ...@@ -575,17 +588,18 @@ xfs_attr_remove(
const char *name, const char *name,
int flags) int flags)
{ {
int namelen; int error;
struct xfs_name xname;
namelen = strlen(name);
if (namelen >= MAXNAMELEN)
return EFAULT; /* match IRIX behaviour */
XFS_STATS_INC(xs_attr_remove); XFS_STATS_INC(xs_attr_remove);
if (XFS_FORCED_SHUTDOWN(dp->i_mount)) if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return (EIO); return (EIO);
error = xfs_attr_name_to_xname(&xname, name);
if (error)
return error;
xfs_ilock(dp, XFS_ILOCK_SHARED); xfs_ilock(dp, XFS_ILOCK_SHARED);
if (XFS_IFORK_Q(dp) == 0 || if (XFS_IFORK_Q(dp) == 0 ||
(dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
...@@ -595,10 +609,10 @@ xfs_attr_remove( ...@@ -595,10 +609,10 @@ xfs_attr_remove(
} }
xfs_iunlock(dp, XFS_ILOCK_SHARED); xfs_iunlock(dp, XFS_ILOCK_SHARED);
return xfs_attr_remove_int(dp, name, namelen, flags); return xfs_attr_remove_int(dp, &xname, flags);
} }
int /* error */ STATIC int
xfs_attr_list_int(xfs_attr_list_context_t *context) xfs_attr_list_int(xfs_attr_list_context_t *context)
{ {
int error; int error;
...@@ -2522,8 +2536,7 @@ attr_generic_get( ...@@ -2522,8 +2536,7 @@ attr_generic_get(
{ {
int error, asize = size; int error, asize = size;
error = xfs_attr_get(xfs_vtoi(vp), name, data, error = xfs_attr_get(xfs_vtoi(vp), name, data, &asize, xflags);
&asize, xflags, NULL);
if (!error) if (!error)
return asize; return asize;
return -error; return -error;
......
...@@ -158,14 +158,10 @@ struct xfs_da_args; ...@@ -158,14 +158,10 @@ struct xfs_da_args;
/* /*
* Overall external interface routines. * Overall external interface routines.
*/ */
int xfs_attr_set_int(struct xfs_inode *, const char *, int, char *, int, int);
int xfs_attr_remove_int(struct xfs_inode *, const char *, int, int);
int xfs_attr_list_int(struct xfs_attr_list_context *);
int xfs_attr_inactive(struct xfs_inode *dp); int xfs_attr_inactive(struct xfs_inode *dp);
int xfs_attr_shortform_getvalue(struct xfs_da_args *); int xfs_attr_shortform_getvalue(struct xfs_da_args *);
int xfs_attr_fetch(struct xfs_inode *, const char *, int, int xfs_attr_fetch(struct xfs_inode *, struct xfs_name *, char *, int *, int);
char *, int *, int, struct cred *);
int xfs_attr_rmtval_get(struct xfs_da_args *args); int xfs_attr_rmtval_get(struct xfs_da_args *args);
#endif /* __XFS_ATTR_H__ */ #endif /* __XFS_ATTR_H__ */
...@@ -4074,7 +4074,6 @@ xfs_bmap_add_attrfork( ...@@ -4074,7 +4074,6 @@ xfs_bmap_add_attrfork(
error2: error2:
xfs_bmap_cancel(&flist); xfs_bmap_cancel(&flist);
error1: error1:
ASSERT(ismrlocked(&ip->i_lock,MR_UPDATE));
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, XFS_ILOCK_EXCL);
error0: error0:
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
......
...@@ -162,7 +162,7 @@ xfs_swap_extents( ...@@ -162,7 +162,7 @@ xfs_swap_extents(
ips[1] = ip; ips[1] = ip;
} }
xfs_lock_inodes(ips, 2, 0, lock_flags); xfs_lock_inodes(ips, 2, lock_flags);
locked = 1; locked = 1;
/* Verify that both files have the same format */ /* Verify that both files have the same format */
...@@ -265,7 +265,7 @@ xfs_swap_extents( ...@@ -265,7 +265,7 @@ xfs_swap_extents(
locked = 0; locked = 0;
goto error0; goto error0;
} }
xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL); xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL);
/* /*
* Count the number of extended attribute blocks * Count the number of extended attribute blocks
......
...@@ -462,7 +462,7 @@ xfs_fs_counts( ...@@ -462,7 +462,7 @@ xfs_fs_counts(
xfs_mount_t *mp, xfs_mount_t *mp,
xfs_fsop_counts_t *cnt) xfs_fsop_counts_t *cnt)
{ {
xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT); xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
spin_lock(&mp->m_sb_lock); spin_lock(&mp->m_sb_lock);
cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
cnt->freertx = mp->m_sb.sb_frextents; cnt->freertx = mp->m_sb.sb_frextents;
...@@ -524,7 +524,7 @@ xfs_reserve_blocks( ...@@ -524,7 +524,7 @@ xfs_reserve_blocks(
*/ */
retry: retry:
spin_lock(&mp->m_sb_lock); spin_lock(&mp->m_sb_lock);
xfs_icsb_sync_counters_flags(mp, XFS_ICSB_SB_LOCKED); xfs_icsb_sync_counters_locked(mp, 0);
/* /*
* If our previous reservation was larger than the current value, * If our previous reservation was larger than the current value,
...@@ -552,11 +552,8 @@ xfs_reserve_blocks( ...@@ -552,11 +552,8 @@ xfs_reserve_blocks(
mp->m_resblks += free; mp->m_resblks += free;
mp->m_resblks_avail += free; mp->m_resblks_avail += free;
fdblks_delta = -free; fdblks_delta = -free;
mp->m_sb.sb_fdblocks = XFS_ALLOC_SET_ASIDE(mp);
} else { } else {
fdblks_delta = -delta; fdblks_delta = -delta;
mp->m_sb.sb_fdblocks =
lcounter + XFS_ALLOC_SET_ASIDE(mp);
mp->m_resblks = request; mp->m_resblks = request;
mp->m_resblks_avail += delta; mp->m_resblks_avail += delta;
} }
...@@ -587,7 +584,6 @@ xfs_reserve_blocks( ...@@ -587,7 +584,6 @@ xfs_reserve_blocks(
if (error == ENOSPC) if (error == ENOSPC)
goto retry; goto retry;
} }
return 0; return 0;
} }
......
...@@ -147,6 +147,7 @@ xfs_ialloc_ag_alloc( ...@@ -147,6 +147,7 @@ xfs_ialloc_ag_alloc(
int version; /* inode version number to use */ int version; /* inode version number to use */
int isaligned = 0; /* inode allocation at stripe unit */ int isaligned = 0; /* inode allocation at stripe unit */
/* boundary */ /* boundary */
unsigned int gen;
args.tp = tp; args.tp = tp;
args.mp = tp->t_mountp; args.mp = tp->t_mountp;
...@@ -290,6 +291,14 @@ xfs_ialloc_ag_alloc( ...@@ -290,6 +291,14 @@ xfs_ialloc_ag_alloc(
else else
version = XFS_DINODE_VERSION_1; version = XFS_DINODE_VERSION_1;
/*
* Seed the new inode cluster with a random generation number. This
* prevents short-term reuse of generation numbers if a chunk is
* freed and then immediately reallocated. We use random numbers
* rather than a linear progression to prevent the next generation
* number from being easily guessable.
*/
gen = random32();
for (j = 0; j < nbufs; j++) { for (j = 0; j < nbufs; j++) {
/* /*
* Get the block. * Get the block.
...@@ -309,6 +318,7 @@ xfs_ialloc_ag_alloc( ...@@ -309,6 +318,7 @@ xfs_ialloc_ag_alloc(
free = XFS_MAKE_IPTR(args.mp, fbuf, i); free = XFS_MAKE_IPTR(args.mp, fbuf, i);
free->di_core.di_magic = cpu_to_be16(XFS_DINODE_MAGIC); free->di_core.di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
free->di_core.di_version = version; free->di_core.di_version = version;
free->di_core.di_gen = cpu_to_be32(gen);
free->di_next_unlinked = cpu_to_be32(NULLAGINO); free->di_next_unlinked = cpu_to_be32(NULLAGINO);
xfs_ialloc_log_di(tp, fbuf, i, xfs_ialloc_log_di(tp, fbuf, i,
XFS_DI_CORE_BITS | XFS_DI_NEXT_UNLINKED); XFS_DI_CORE_BITS | XFS_DI_NEXT_UNLINKED);
......
...@@ -593,7 +593,8 @@ xfs_iunlock_map_shared( ...@@ -593,7 +593,8 @@ xfs_iunlock_map_shared(
* XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
*/ */
void void
xfs_ilock(xfs_inode_t *ip, xfs_ilock(
xfs_inode_t *ip,
uint lock_flags) uint lock_flags)
{ {
/* /*
...@@ -607,16 +608,16 @@ xfs_ilock(xfs_inode_t *ip, ...@@ -607,16 +608,16 @@ xfs_ilock(xfs_inode_t *ip,
(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
if (lock_flags & XFS_IOLOCK_EXCL) { if (lock_flags & XFS_IOLOCK_EXCL)
mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags)); mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
} else if (lock_flags & XFS_IOLOCK_SHARED) { else if (lock_flags & XFS_IOLOCK_SHARED)
mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags)); mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
}
if (lock_flags & XFS_ILOCK_EXCL) { if (lock_flags & XFS_ILOCK_EXCL)
mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
} else if (lock_flags & XFS_ILOCK_SHARED) { else if (lock_flags & XFS_ILOCK_SHARED)
mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
}
xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address); xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address);
} }
...@@ -631,15 +632,12 @@ xfs_ilock(xfs_inode_t *ip, ...@@ -631,15 +632,12 @@ xfs_ilock(xfs_inode_t *ip,
* lock_flags -- this parameter indicates the inode's locks to be * lock_flags -- this parameter indicates the inode's locks to be
* to be locked. See the comment for xfs_ilock() for a list * to be locked. See the comment for xfs_ilock() for a list
* of valid values. * of valid values.
*
*/ */
int int
xfs_ilock_nowait(xfs_inode_t *ip, xfs_ilock_nowait(
xfs_inode_t *ip,
uint lock_flags) uint lock_flags)
{ {
int iolocked;
int ilocked;
/* /*
* You can't set both SHARED and EXCL for the same lock, * You can't set both SHARED and EXCL for the same lock,
* and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
...@@ -651,37 +649,30 @@ xfs_ilock_nowait(xfs_inode_t *ip, ...@@ -651,37 +649,30 @@ xfs_ilock_nowait(xfs_inode_t *ip,
(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
iolocked = 0;
if (lock_flags & XFS_IOLOCK_EXCL) { if (lock_flags & XFS_IOLOCK_EXCL) {
iolocked = mrtryupdate(&ip->i_iolock); if (!mrtryupdate(&ip->i_iolock))
if (!iolocked) { goto out;
return 0;
}
} else if (lock_flags & XFS_IOLOCK_SHARED) { } else if (lock_flags & XFS_IOLOCK_SHARED) {
iolocked = mrtryaccess(&ip->i_iolock); if (!mrtryaccess(&ip->i_iolock))
if (!iolocked) { goto out;
return 0;
}
} }
if (lock_flags & XFS_ILOCK_EXCL) { if (lock_flags & XFS_ILOCK_EXCL) {
ilocked = mrtryupdate(&ip->i_lock); if (!mrtryupdate(&ip->i_lock))
if (!ilocked) { goto out_undo_iolock;
if (iolocked) {
mrunlock(&ip->i_iolock);
}
return 0;
}
} else if (lock_flags & XFS_ILOCK_SHARED) { } else if (lock_flags & XFS_ILOCK_SHARED) {
ilocked = mrtryaccess(&ip->i_lock); if (!mrtryaccess(&ip->i_lock))
if (!ilocked) { goto out_undo_iolock;
if (iolocked) {
mrunlock(&ip->i_iolock);
}
return 0;
}
} }
xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address); xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address);
return 1; return 1;
out_undo_iolock:
if (lock_flags & XFS_IOLOCK_EXCL)
mrunlock_excl(&ip->i_iolock);
else if (lock_flags & XFS_IOLOCK_SHARED)
mrunlock_shared(&ip->i_iolock);
out:
return 0;
} }
/* /*
...@@ -697,7 +688,8 @@ xfs_ilock_nowait(xfs_inode_t *ip, ...@@ -697,7 +688,8 @@ xfs_ilock_nowait(xfs_inode_t *ip,
* *
*/ */
void void
xfs_iunlock(xfs_inode_t *ip, xfs_iunlock(
xfs_inode_t *ip,
uint lock_flags) uint lock_flags)
{ {
/* /*
...@@ -713,32 +705,26 @@ xfs_iunlock(xfs_inode_t *ip, ...@@ -713,32 +705,26 @@ xfs_iunlock(xfs_inode_t *ip,
XFS_LOCK_DEP_MASK)) == 0); XFS_LOCK_DEP_MASK)) == 0);
ASSERT(lock_flags != 0); ASSERT(lock_flags != 0);
if (lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) { if (lock_flags & XFS_IOLOCK_EXCL)
ASSERT(!(lock_flags & XFS_IOLOCK_SHARED) || mrunlock_excl(&ip->i_iolock);
(ismrlocked(&ip->i_iolock, MR_ACCESS))); else if (lock_flags & XFS_IOLOCK_SHARED)
ASSERT(!(lock_flags & XFS_IOLOCK_EXCL) || mrunlock_shared(&ip->i_iolock);
(ismrlocked(&ip->i_iolock, MR_UPDATE)));
mrunlock(&ip->i_iolock);
}
if (lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) { if (lock_flags & XFS_ILOCK_EXCL)
ASSERT(!(lock_flags & XFS_ILOCK_SHARED) || mrunlock_excl(&ip->i_lock);
(ismrlocked(&ip->i_lock, MR_ACCESS))); else if (lock_flags & XFS_ILOCK_SHARED)
ASSERT(!(lock_flags & XFS_ILOCK_EXCL) || mrunlock_shared(&ip->i_lock);
(ismrlocked(&ip->i_lock, MR_UPDATE)));
mrunlock(&ip->i_lock);
if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) &&
!(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) {
/* /*
* Let the AIL know that this item has been unlocked in case * Let the AIL know that this item has been unlocked in case
* it is in the AIL and anyone is waiting on it. Don't do * it is in the AIL and anyone is waiting on it. Don't do
* this if the caller has asked us not to. * this if the caller has asked us not to.
*/ */
if (!(lock_flags & XFS_IUNLOCK_NONOTIFY) &&
ip->i_itemp != NULL) {
xfs_trans_unlocked_item(ip->i_mount, xfs_trans_unlocked_item(ip->i_mount,
(xfs_log_item_t*)(ip->i_itemp)); (xfs_log_item_t*)(ip->i_itemp));
} }
}
xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address); xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address);
} }
...@@ -747,21 +733,47 @@ xfs_iunlock(xfs_inode_t *ip, ...@@ -747,21 +733,47 @@ xfs_iunlock(xfs_inode_t *ip,
* if it is being demoted. * if it is being demoted.
*/ */
void void
xfs_ilock_demote(xfs_inode_t *ip, xfs_ilock_demote(
xfs_inode_t *ip,
uint lock_flags) uint lock_flags)
{ {
ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)); ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0); ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
if (lock_flags & XFS_ILOCK_EXCL) { if (lock_flags & XFS_ILOCK_EXCL)
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
mrdemote(&ip->i_lock); mrdemote(&ip->i_lock);
} if (lock_flags & XFS_IOLOCK_EXCL)
if (lock_flags & XFS_IOLOCK_EXCL) {
ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
mrdemote(&ip->i_iolock); mrdemote(&ip->i_iolock);
}
#ifdef DEBUG
/*
* Debug-only routine, without additional rw_semaphore APIs, we can
* now only answer requests regarding whether we hold the lock for write
* (reader state is outside our visibility, we only track writer state).
*
* Note: this means !xfs_isilocked would give false positives, so don't do that.
*/
int
xfs_isilocked(
xfs_inode_t *ip,
uint lock_flags)
{
if ((lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) ==
XFS_ILOCK_EXCL) {
if (!ip->i_lock.mr_writer)
return 0;
} }
if ((lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) ==
XFS_IOLOCK_EXCL) {
if (!ip->i_iolock.mr_writer)
return 0;
}
return 1;
} }
#endif
/* /*
* The following three routines simply manage the i_flock * The following three routines simply manage the i_flock
......
...@@ -1291,7 +1291,7 @@ xfs_file_last_byte( ...@@ -1291,7 +1291,7 @@ xfs_file_last_byte(
xfs_fileoff_t size_last_block; xfs_fileoff_t size_last_block;
int error; int error;
ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE | MR_ACCESS)); ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED));
mp = ip->i_mount; mp = ip->i_mount;
/* /*
...@@ -1402,7 +1402,7 @@ xfs_itruncate_start( ...@@ -1402,7 +1402,7 @@ xfs_itruncate_start(
bhv_vnode_t *vp; bhv_vnode_t *vp;
int error = 0; int error = 0;
ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0); ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
ASSERT((new_size == 0) || (new_size <= ip->i_size)); ASSERT((new_size == 0) || (new_size <= ip->i_size));
ASSERT((flags == XFS_ITRUNC_DEFINITE) || ASSERT((flags == XFS_ITRUNC_DEFINITE) ||
(flags == XFS_ITRUNC_MAYBE)); (flags == XFS_ITRUNC_MAYBE));
...@@ -1528,8 +1528,7 @@ xfs_itruncate_finish( ...@@ -1528,8 +1528,7 @@ xfs_itruncate_finish(
xfs_bmap_free_t free_list; xfs_bmap_free_t free_list;
int error; int error;
ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
ASSERT((new_size == 0) || (new_size <= ip->i_size)); ASSERT((new_size == 0) || (new_size <= ip->i_size));
ASSERT(*tp != NULL); ASSERT(*tp != NULL);
ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
...@@ -1780,8 +1779,7 @@ xfs_igrow_start( ...@@ -1780,8 +1779,7 @@ xfs_igrow_start(
xfs_fsize_t new_size, xfs_fsize_t new_size,
cred_t *credp) cred_t *credp)
{ {
ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
ASSERT(new_size > ip->i_size); ASSERT(new_size > ip->i_size);
/* /*
...@@ -1809,8 +1807,7 @@ xfs_igrow_finish( ...@@ -1809,8 +1807,7 @@ xfs_igrow_finish(
xfs_fsize_t new_size, xfs_fsize_t new_size,
int change_flag) int change_flag)
{ {
ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
ASSERT(ip->i_transp == tp); ASSERT(ip->i_transp == tp);
ASSERT(new_size > ip->i_size); ASSERT(new_size > ip->i_size);
...@@ -2287,7 +2284,7 @@ xfs_ifree( ...@@ -2287,7 +2284,7 @@ xfs_ifree(
xfs_dinode_t *dip; xfs_dinode_t *dip;
xfs_buf_t *ibp; xfs_buf_t *ibp;
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(ip->i_transp == tp); ASSERT(ip->i_transp == tp);
ASSERT(ip->i_d.di_nlink == 0); ASSERT(ip->i_d.di_nlink == 0);
ASSERT(ip->i_d.di_nextents == 0); ASSERT(ip->i_d.di_nextents == 0);
...@@ -2746,7 +2743,7 @@ void ...@@ -2746,7 +2743,7 @@ void
xfs_ipin( xfs_ipin(
xfs_inode_t *ip) xfs_inode_t *ip)
{ {
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
atomic_inc(&ip->i_pincount); atomic_inc(&ip->i_pincount);
} }
...@@ -2779,7 +2776,7 @@ __xfs_iunpin_wait( ...@@ -2779,7 +2776,7 @@ __xfs_iunpin_wait(
{ {
xfs_inode_log_item_t *iip = ip->i_itemp; xfs_inode_log_item_t *iip = ip->i_itemp;
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
if (atomic_read(&ip->i_pincount) == 0) if (atomic_read(&ip->i_pincount) == 0)
return; return;
...@@ -2829,7 +2826,7 @@ xfs_iextents_copy( ...@@ -2829,7 +2826,7 @@ xfs_iextents_copy(
xfs_fsblock_t start_block; xfs_fsblock_t start_block;
ifp = XFS_IFORK_PTR(ip, whichfork); ifp = XFS_IFORK_PTR(ip, whichfork);
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
ASSERT(ifp->if_bytes > 0); ASSERT(ifp->if_bytes > 0);
nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
...@@ -3132,7 +3129,7 @@ xfs_iflush( ...@@ -3132,7 +3129,7 @@ xfs_iflush(
XFS_STATS_INC(xs_iflush_count); XFS_STATS_INC(xs_iflush_count);
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
ASSERT(issemalocked(&(ip->i_flock))); ASSERT(issemalocked(&(ip->i_flock)));
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
ip->i_d.di_nextents > ip->i_df.if_ext_max); ip->i_d.di_nextents > ip->i_df.if_ext_max);
...@@ -3297,7 +3294,7 @@ xfs_iflush_int( ...@@ -3297,7 +3294,7 @@ xfs_iflush_int(
int first; int first;
#endif #endif
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
ASSERT(issemalocked(&(ip->i_flock))); ASSERT(issemalocked(&(ip->i_flock)));
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
ip->i_d.di_nextents > ip->i_df.if_ext_max); ip->i_d.di_nextents > ip->i_df.if_ext_max);
......
...@@ -386,20 +386,9 @@ xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags) ...@@ -386,20 +386,9 @@ xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags)
#define XFS_ILOCK_EXCL (1<<2) #define XFS_ILOCK_EXCL (1<<2)
#define XFS_ILOCK_SHARED (1<<3) #define XFS_ILOCK_SHARED (1<<3)
#define XFS_IUNLOCK_NONOTIFY (1<<4) #define XFS_IUNLOCK_NONOTIFY (1<<4)
/* #define XFS_IOLOCK_NESTED (1<<5) */
#define XFS_EXTENT_TOKEN_RD (1<<6)
#define XFS_SIZE_TOKEN_RD (1<<7)
#define XFS_EXTSIZE_RD (XFS_EXTENT_TOKEN_RD|XFS_SIZE_TOKEN_RD)
#define XFS_WILLLEND (1<<8) /* Always acquire tokens for lending */
#define XFS_EXTENT_TOKEN_WR (XFS_EXTENT_TOKEN_RD | XFS_WILLLEND)
#define XFS_SIZE_TOKEN_WR (XFS_SIZE_TOKEN_RD | XFS_WILLLEND)
#define XFS_EXTSIZE_WR (XFS_EXTSIZE_RD | XFS_WILLLEND)
/* TODO:XFS_SIZE_TOKEN_WANT (1<<9) */
#define XFS_LOCK_MASK (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED \ #define XFS_LOCK_MASK (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED \
| XFS_ILOCK_EXCL | XFS_ILOCK_SHARED \ | XFS_ILOCK_EXCL | XFS_ILOCK_SHARED)
| XFS_EXTENT_TOKEN_RD | XFS_SIZE_TOKEN_RD \
| XFS_WILLLEND)
/* /*
* Flags for lockdep annotations. * Flags for lockdep annotations.
...@@ -483,6 +472,7 @@ void xfs_ilock(xfs_inode_t *, uint); ...@@ -483,6 +472,7 @@ void xfs_ilock(xfs_inode_t *, uint);
int xfs_ilock_nowait(xfs_inode_t *, uint); int xfs_ilock_nowait(xfs_inode_t *, uint);
void xfs_iunlock(xfs_inode_t *, uint); void xfs_iunlock(xfs_inode_t *, uint);
void xfs_ilock_demote(xfs_inode_t *, uint); void xfs_ilock_demote(xfs_inode_t *, uint);
int xfs_isilocked(xfs_inode_t *, uint);
void xfs_iflock(xfs_inode_t *); void xfs_iflock(xfs_inode_t *);
int xfs_iflock_nowait(xfs_inode_t *); int xfs_iflock_nowait(xfs_inode_t *);
uint xfs_ilock_map_shared(xfs_inode_t *); uint xfs_ilock_map_shared(xfs_inode_t *);
...@@ -534,7 +524,7 @@ int xfs_iflush(xfs_inode_t *, uint); ...@@ -534,7 +524,7 @@ int xfs_iflush(xfs_inode_t *, uint);
void xfs_iflush_all(struct xfs_mount *); void xfs_iflush_all(struct xfs_mount *);
void xfs_ichgtime(xfs_inode_t *, int); void xfs_ichgtime(xfs_inode_t *, int);
xfs_fsize_t xfs_file_last_byte(xfs_inode_t *); xfs_fsize_t xfs_file_last_byte(xfs_inode_t *);
void xfs_lock_inodes(xfs_inode_t **, int, int, uint); void xfs_lock_inodes(xfs_inode_t **, int, uint);
void xfs_synchronize_atime(xfs_inode_t *); void xfs_synchronize_atime(xfs_inode_t *);
void xfs_mark_inode_dirty_sync(xfs_inode_t *); void xfs_mark_inode_dirty_sync(xfs_inode_t *);
......
...@@ -547,7 +547,7 @@ STATIC void ...@@ -547,7 +547,7 @@ STATIC void
xfs_inode_item_pin( xfs_inode_item_pin(
xfs_inode_log_item_t *iip) xfs_inode_log_item_t *iip)
{ {
ASSERT(ismrlocked(&(iip->ili_inode->i_lock), MR_UPDATE)); ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL));
xfs_ipin(iip->ili_inode); xfs_ipin(iip->ili_inode);
} }
...@@ -664,13 +664,13 @@ xfs_inode_item_unlock( ...@@ -664,13 +664,13 @@ xfs_inode_item_unlock(
ASSERT(iip != NULL); ASSERT(iip != NULL);
ASSERT(iip->ili_inode->i_itemp != NULL); ASSERT(iip->ili_inode->i_itemp != NULL);
ASSERT(ismrlocked(&(iip->ili_inode->i_lock), MR_UPDATE)); ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL));
ASSERT((!(iip->ili_inode->i_itemp->ili_flags & ASSERT((!(iip->ili_inode->i_itemp->ili_flags &
XFS_ILI_IOLOCKED_EXCL)) || XFS_ILI_IOLOCKED_EXCL)) ||
ismrlocked(&(iip->ili_inode->i_iolock), MR_UPDATE)); xfs_isilocked(iip->ili_inode, XFS_IOLOCK_EXCL));
ASSERT((!(iip->ili_inode->i_itemp->ili_flags & ASSERT((!(iip->ili_inode->i_itemp->ili_flags &
XFS_ILI_IOLOCKED_SHARED)) || XFS_ILI_IOLOCKED_SHARED)) ||
ismrlocked(&(iip->ili_inode->i_iolock), MR_ACCESS)); xfs_isilocked(iip->ili_inode, XFS_IOLOCK_SHARED));
/* /*
* Clear the transaction pointer in the inode. * Clear the transaction pointer in the inode.
*/ */
...@@ -769,7 +769,7 @@ xfs_inode_item_pushbuf( ...@@ -769,7 +769,7 @@ xfs_inode_item_pushbuf(
ip = iip->ili_inode; ip = iip->ili_inode;
ASSERT(ismrlocked(&(ip->i_lock), MR_ACCESS)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
/* /*
* The ili_pushbuf_flag keeps others from * The ili_pushbuf_flag keeps others from
...@@ -857,7 +857,7 @@ xfs_inode_item_push( ...@@ -857,7 +857,7 @@ xfs_inode_item_push(
ip = iip->ili_inode; ip = iip->ili_inode;
ASSERT(ismrlocked(&(ip->i_lock), MR_ACCESS)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
ASSERT(issemalocked(&(ip->i_flock))); ASSERT(issemalocked(&(ip->i_flock)));
/* /*
* Since we were able to lock the inode's flush lock and * Since we were able to lock the inode's flush lock and
......
...@@ -196,14 +196,14 @@ xfs_iomap( ...@@ -196,14 +196,14 @@ xfs_iomap(
break; break;
case BMAPI_WRITE: case BMAPI_WRITE:
xfs_iomap_enter_trace(XFS_IOMAP_WRITE_ENTER, ip, offset, count); xfs_iomap_enter_trace(XFS_IOMAP_WRITE_ENTER, ip, offset, count);
lockmode = XFS_ILOCK_EXCL|XFS_EXTSIZE_WR; lockmode = XFS_ILOCK_EXCL;
if (flags & BMAPI_IGNSTATE) if (flags & BMAPI_IGNSTATE)
bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE; bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE;
xfs_ilock(ip, lockmode); xfs_ilock(ip, lockmode);
break; break;
case BMAPI_ALLOCATE: case BMAPI_ALLOCATE:
xfs_iomap_enter_trace(XFS_IOMAP_ALLOC_ENTER, ip, offset, count); xfs_iomap_enter_trace(XFS_IOMAP_ALLOC_ENTER, ip, offset, count);
lockmode = XFS_ILOCK_SHARED|XFS_EXTSIZE_RD; lockmode = XFS_ILOCK_SHARED;
bmapi_flags = XFS_BMAPI_ENTIRE; bmapi_flags = XFS_BMAPI_ENTIRE;
/* Attempt non-blocking lock */ /* Attempt non-blocking lock */
...@@ -523,8 +523,7 @@ xfs_iomap_write_direct( ...@@ -523,8 +523,7 @@ xfs_iomap_write_direct(
goto error_out; goto error_out;
} }
if (unlikely(!imap.br_startblock && if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) {
!(XFS_IS_REALTIME_INODE(ip)))) {
error = xfs_cmn_err_fsblock_zero(ip, &imap); error = xfs_cmn_err_fsblock_zero(ip, &imap);
goto error_out; goto error_out;
} }
...@@ -624,7 +623,7 @@ xfs_iomap_write_delay( ...@@ -624,7 +623,7 @@ xfs_iomap_write_delay(
int prealloc, fsynced = 0; int prealloc, fsynced = 0;
int error; int error;
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
/* /*
* Make sure that the dquots are there. This doesn't hold * Make sure that the dquots are there. This doesn't hold
...@@ -686,8 +685,7 @@ xfs_iomap_write_delay( ...@@ -686,8 +685,7 @@ xfs_iomap_write_delay(
goto retry; goto retry;
} }
if (unlikely(!imap[0].br_startblock && if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip)))
!(XFS_IS_REALTIME_INODE(ip))))
return xfs_cmn_err_fsblock_zero(ip, &imap[0]); return xfs_cmn_err_fsblock_zero(ip, &imap[0]);
*ret_imap = imap[0]; *ret_imap = imap[0];
...@@ -838,9 +836,9 @@ xfs_iomap_write_allocate( ...@@ -838,9 +836,9 @@ xfs_iomap_write_allocate(
* See if we were able to allocate an extent that * See if we were able to allocate an extent that
* covers at least part of the callers request * covers at least part of the callers request
*/ */
if (unlikely(!imap.br_startblock && if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
XFS_IS_REALTIME_INODE(ip)))
return xfs_cmn_err_fsblock_zero(ip, &imap); return xfs_cmn_err_fsblock_zero(ip, &imap);
if ((offset_fsb >= imap.br_startoff) && if ((offset_fsb >= imap.br_startoff) &&
(offset_fsb < (imap.br_startoff + (offset_fsb < (imap.br_startoff +
imap.br_blockcount))) { imap.br_blockcount))) {
...@@ -934,8 +932,7 @@ xfs_iomap_write_unwritten( ...@@ -934,8 +932,7 @@ xfs_iomap_write_unwritten(
if (error) if (error)
return XFS_ERROR(error); return XFS_ERROR(error);
if (unlikely(!imap.br_startblock && if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
!(XFS_IS_REALTIME_INODE(ip))))
return xfs_cmn_err_fsblock_zero(ip, &imap); return xfs_cmn_err_fsblock_zero(ip, &imap);
if ((numblks_fsb = imap.br_blockcount) == 0) { if ((numblks_fsb = imap.br_blockcount) == 0) {
......
...@@ -71,11 +71,6 @@ xfs_bulkstat_one_iget( ...@@ -71,11 +71,6 @@ xfs_bulkstat_one_iget(
ASSERT(ip != NULL); ASSERT(ip != NULL);
ASSERT(ip->i_blkno != (xfs_daddr_t)0); ASSERT(ip->i_blkno != (xfs_daddr_t)0);
if (ip->i_d.di_mode == 0) {
*stat = BULKSTAT_RV_NOTHING;
error = XFS_ERROR(ENOENT);
goto out_iput;
}
vp = XFS_ITOV(ip); vp = XFS_ITOV(ip);
dic = &ip->i_d; dic = &ip->i_d;
...@@ -124,7 +119,6 @@ xfs_bulkstat_one_iget( ...@@ -124,7 +119,6 @@ xfs_bulkstat_one_iget(
break; break;
} }
out_iput:
xfs_iput(ip, XFS_ILOCK_SHARED); xfs_iput(ip, XFS_ILOCK_SHARED);
return error; return error;
} }
......
...@@ -54,8 +54,9 @@ STATIC void xfs_unmountfs_wait(xfs_mount_t *); ...@@ -54,8 +54,9 @@ STATIC void xfs_unmountfs_wait(xfs_mount_t *);
#ifdef HAVE_PERCPU_SB #ifdef HAVE_PERCPU_SB
STATIC void xfs_icsb_destroy_counters(xfs_mount_t *); STATIC void xfs_icsb_destroy_counters(xfs_mount_t *);
STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
int, int); int);
STATIC void xfs_icsb_sync_counters(xfs_mount_t *); STATIC void xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t,
int);
STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t, STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t,
int64_t, int); int64_t, int);
STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t); STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
...@@ -63,8 +64,8 @@ STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t); ...@@ -63,8 +64,8 @@ STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
#else #else
#define xfs_icsb_destroy_counters(mp) do { } while (0) #define xfs_icsb_destroy_counters(mp) do { } while (0)
#define xfs_icsb_balance_counter(mp, a, b, c) do { } while (0) #define xfs_icsb_balance_counter(mp, a, b) do { } while (0)
#define xfs_icsb_sync_counters(mp) do { } while (0) #define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0)
#define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0) #define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0)
#endif #endif
...@@ -1400,7 +1401,7 @@ xfs_log_sbcount( ...@@ -1400,7 +1401,7 @@ xfs_log_sbcount(
if (!xfs_fs_writable(mp)) if (!xfs_fs_writable(mp))
return 0; return 0;
xfs_icsb_sync_counters(mp); xfs_icsb_sync_counters(mp, 0);
/* /*
* we don't need to do this if we are updating the superblock * we don't need to do this if we are updating the superblock
...@@ -2026,9 +2027,9 @@ xfs_icsb_cpu_notify( ...@@ -2026,9 +2027,9 @@ xfs_icsb_cpu_notify(
case CPU_ONLINE: case CPU_ONLINE:
case CPU_ONLINE_FROZEN: case CPU_ONLINE_FROZEN:
xfs_icsb_lock(mp); xfs_icsb_lock(mp);
xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0); xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0); xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0); xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
xfs_icsb_unlock(mp); xfs_icsb_unlock(mp);
break; break;
case CPU_DEAD: case CPU_DEAD:
...@@ -2048,12 +2049,9 @@ xfs_icsb_cpu_notify( ...@@ -2048,12 +2049,9 @@ xfs_icsb_cpu_notify(
memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, xfs_icsb_balance_counter_locked(mp, XFS_SBS_ICOUNT, 0);
XFS_ICSB_SB_LOCKED, 0); xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0);
xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0);
XFS_ICSB_SB_LOCKED, 0);
xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS,
XFS_ICSB_SB_LOCKED, 0);
spin_unlock(&mp->m_sb_lock); spin_unlock(&mp->m_sb_lock);
xfs_icsb_unlock(mp); xfs_icsb_unlock(mp);
break; break;
...@@ -2105,9 +2103,9 @@ xfs_icsb_reinit_counters( ...@@ -2105,9 +2103,9 @@ xfs_icsb_reinit_counters(
* initial balance kicks us off correctly * initial balance kicks us off correctly
*/ */
mp->m_icsb_counters = -1; mp->m_icsb_counters = -1;
xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0); xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0); xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0); xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
xfs_icsb_unlock(mp); xfs_icsb_unlock(mp);
} }
...@@ -2223,7 +2221,7 @@ xfs_icsb_disable_counter( ...@@ -2223,7 +2221,7 @@ xfs_icsb_disable_counter(
if (!test_and_set_bit(field, &mp->m_icsb_counters)) { if (!test_and_set_bit(field, &mp->m_icsb_counters)) {
/* drain back to superblock */ /* drain back to superblock */
xfs_icsb_count(mp, &cnt, XFS_ICSB_SB_LOCKED|XFS_ICSB_LAZY_COUNT); xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT);
switch(field) { switch(field) {
case XFS_SBS_ICOUNT: case XFS_SBS_ICOUNT:
mp->m_sb.sb_icount = cnt.icsb_icount; mp->m_sb.sb_icount = cnt.icsb_icount;
...@@ -2278,38 +2276,33 @@ xfs_icsb_enable_counter( ...@@ -2278,38 +2276,33 @@ xfs_icsb_enable_counter(
} }
void void
xfs_icsb_sync_counters_flags( xfs_icsb_sync_counters_locked(
xfs_mount_t *mp, xfs_mount_t *mp,
int flags) int flags)
{ {
xfs_icsb_cnts_t cnt; xfs_icsb_cnts_t cnt;
/* Pass 1: lock all counters */
if ((flags & XFS_ICSB_SB_LOCKED) == 0)
spin_lock(&mp->m_sb_lock);
xfs_icsb_count(mp, &cnt, flags); xfs_icsb_count(mp, &cnt, flags);
/* Step 3: update mp->m_sb fields */
if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT)) if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT))
mp->m_sb.sb_icount = cnt.icsb_icount; mp->m_sb.sb_icount = cnt.icsb_icount;
if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE)) if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE))
mp->m_sb.sb_ifree = cnt.icsb_ifree; mp->m_sb.sb_ifree = cnt.icsb_ifree;
if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS)) if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS))
mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
if ((flags & XFS_ICSB_SB_LOCKED) == 0)
spin_unlock(&mp->m_sb_lock);
} }
/* /*
* Accurate update of per-cpu counters to incore superblock * Accurate update of per-cpu counters to incore superblock
*/ */
STATIC void void
xfs_icsb_sync_counters( xfs_icsb_sync_counters(
xfs_mount_t *mp) xfs_mount_t *mp,
int flags)
{ {
xfs_icsb_sync_counters_flags(mp, 0); spin_lock(&mp->m_sb_lock);
xfs_icsb_sync_counters_locked(mp, flags);
spin_unlock(&mp->m_sb_lock);
} }
/* /*
...@@ -2332,19 +2325,15 @@ xfs_icsb_sync_counters( ...@@ -2332,19 +2325,15 @@ xfs_icsb_sync_counters(
#define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \ #define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
(uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp)) (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
STATIC void STATIC void
xfs_icsb_balance_counter( xfs_icsb_balance_counter_locked(
xfs_mount_t *mp, xfs_mount_t *mp,
xfs_sb_field_t field, xfs_sb_field_t field,
int flags,
int min_per_cpu) int min_per_cpu)
{ {
uint64_t count, resid; uint64_t count, resid;
int weight = num_online_cpus(); int weight = num_online_cpus();
uint64_t min = (uint64_t)min_per_cpu; uint64_t min = (uint64_t)min_per_cpu;
if (!(flags & XFS_ICSB_SB_LOCKED))
spin_lock(&mp->m_sb_lock);
/* disable counter and sync counter */ /* disable counter and sync counter */
xfs_icsb_disable_counter(mp, field); xfs_icsb_disable_counter(mp, field);
...@@ -2354,19 +2343,19 @@ xfs_icsb_balance_counter( ...@@ -2354,19 +2343,19 @@ xfs_icsb_balance_counter(
count = mp->m_sb.sb_icount; count = mp->m_sb.sb_icount;
resid = do_div(count, weight); resid = do_div(count, weight);
if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE)) if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
goto out; return;
break; break;
case XFS_SBS_IFREE: case XFS_SBS_IFREE:
count = mp->m_sb.sb_ifree; count = mp->m_sb.sb_ifree;
resid = do_div(count, weight); resid = do_div(count, weight);
if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE)) if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
goto out; return;
break; break;
case XFS_SBS_FDBLOCKS: case XFS_SBS_FDBLOCKS:
count = mp->m_sb.sb_fdblocks; count = mp->m_sb.sb_fdblocks;
resid = do_div(count, weight); resid = do_div(count, weight);
if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp))) if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp)))
goto out; return;
break; break;
default: default:
BUG(); BUG();
...@@ -2375,8 +2364,16 @@ xfs_icsb_balance_counter( ...@@ -2375,8 +2364,16 @@ xfs_icsb_balance_counter(
} }
xfs_icsb_enable_counter(mp, field, count, resid); xfs_icsb_enable_counter(mp, field, count, resid);
out: }
if (!(flags & XFS_ICSB_SB_LOCKED))
STATIC void
xfs_icsb_balance_counter(
xfs_mount_t *mp,
xfs_sb_field_t fields,
int min_per_cpu)
{
spin_lock(&mp->m_sb_lock);
xfs_icsb_balance_counter_locked(mp, fields, min_per_cpu);
spin_unlock(&mp->m_sb_lock); spin_unlock(&mp->m_sb_lock);
} }
...@@ -2484,7 +2481,7 @@ xfs_icsb_modify_counters( ...@@ -2484,7 +2481,7 @@ xfs_icsb_modify_counters(
* we are done. * we are done.
*/ */
if (ret != ENOSPC) if (ret != ENOSPC)
xfs_icsb_balance_counter(mp, field, 0, 0); xfs_icsb_balance_counter(mp, field, 0);
xfs_icsb_unlock(mp); xfs_icsb_unlock(mp);
return ret; return ret;
...@@ -2508,7 +2505,7 @@ xfs_icsb_modify_counters( ...@@ -2508,7 +2505,7 @@ xfs_icsb_modify_counters(
* will either succeed through the fast path or slow path without * will either succeed through the fast path or slow path without
* another balance operation being required. * another balance operation being required.
*/ */
xfs_icsb_balance_counter(mp, field, 0, delta); xfs_icsb_balance_counter(mp, field, delta);
xfs_icsb_unlock(mp); xfs_icsb_unlock(mp);
goto again; goto again;
} }
......
...@@ -206,17 +206,18 @@ typedef struct xfs_icsb_cnts { ...@@ -206,17 +206,18 @@ typedef struct xfs_icsb_cnts {
#define XFS_ICSB_FLAG_LOCK (1 << 0) /* counter lock bit */ #define XFS_ICSB_FLAG_LOCK (1 << 0) /* counter lock bit */
#define XFS_ICSB_SB_LOCKED (1 << 0) /* sb already locked */
#define XFS_ICSB_LAZY_COUNT (1 << 1) /* accuracy not needed */ #define XFS_ICSB_LAZY_COUNT (1 << 1) /* accuracy not needed */
extern int xfs_icsb_init_counters(struct xfs_mount *); extern int xfs_icsb_init_counters(struct xfs_mount *);
extern void xfs_icsb_reinit_counters(struct xfs_mount *); extern void xfs_icsb_reinit_counters(struct xfs_mount *);
extern void xfs_icsb_sync_counters_flags(struct xfs_mount *, int); extern void xfs_icsb_sync_counters(struct xfs_mount *, int);
extern void xfs_icsb_sync_counters_locked(struct xfs_mount *, int);
#else #else
#define xfs_icsb_init_counters(mp) (0) #define xfs_icsb_init_counters(mp) (0)
#define xfs_icsb_reinit_counters(mp) do { } while (0) #define xfs_icsb_reinit_counters(mp) do { } while (0)
#define xfs_icsb_sync_counters_flags(mp, flags) do { } while (0) #define xfs_icsb_sync_counters(mp, flags) do { } while (0)
#define xfs_icsb_sync_counters_locked(mp, flags) do { } while (0)
#endif #endif
typedef struct xfs_ail { typedef struct xfs_ail {
......
This diff is collapsed.
...@@ -111,13 +111,13 @@ xfs_trans_iget( ...@@ -111,13 +111,13 @@ xfs_trans_iget(
*/ */
ASSERT(ip->i_itemp != NULL); ASSERT(ip->i_itemp != NULL);
ASSERT(lock_flags & XFS_ILOCK_EXCL); ASSERT(lock_flags & XFS_ILOCK_EXCL);
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) || ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) ||
ismrlocked(&ip->i_iolock, MR_UPDATE)); xfs_isilocked(ip, XFS_IOLOCK_EXCL));
ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) || ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) ||
(ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_EXCL)); (ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_EXCL));
ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) || ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) ||
ismrlocked(&ip->i_iolock, (MR_UPDATE | MR_ACCESS))); xfs_isilocked(ip, XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED));
ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) || ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) ||
(ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_ANY)); (ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_ANY));
...@@ -185,7 +185,7 @@ xfs_trans_ijoin( ...@@ -185,7 +185,7 @@ xfs_trans_ijoin(
xfs_inode_log_item_t *iip; xfs_inode_log_item_t *iip;
ASSERT(ip->i_transp == NULL); ASSERT(ip->i_transp == NULL);
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(lock_flags & XFS_ILOCK_EXCL); ASSERT(lock_flags & XFS_ILOCK_EXCL);
if (ip->i_itemp == NULL) if (ip->i_itemp == NULL)
xfs_inode_item_init(ip, ip->i_mount); xfs_inode_item_init(ip, ip->i_mount);
...@@ -232,7 +232,7 @@ xfs_trans_ihold( ...@@ -232,7 +232,7 @@ xfs_trans_ihold(
{ {
ASSERT(ip->i_transp == tp); ASSERT(ip->i_transp == tp);
ASSERT(ip->i_itemp != NULL); ASSERT(ip->i_itemp != NULL);
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ip->i_itemp->ili_flags |= XFS_ILI_HOLD; ip->i_itemp->ili_flags |= XFS_ILI_HOLD;
} }
...@@ -257,7 +257,7 @@ xfs_trans_log_inode( ...@@ -257,7 +257,7 @@ xfs_trans_log_inode(
ASSERT(ip->i_transp == tp); ASSERT(ip->i_transp == tp);
ASSERT(ip->i_itemp != NULL); ASSERT(ip->i_itemp != NULL);
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(ip->i_itemp)); lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(ip->i_itemp));
ASSERT(lidp != NULL); ASSERT(lidp != NULL);
......
...@@ -41,49 +41,6 @@ ...@@ -41,49 +41,6 @@
#include "xfs_utils.h" #include "xfs_utils.h"
int
xfs_dir_lookup_int(
xfs_inode_t *dp,
uint lock_mode,
struct xfs_name *name,
xfs_ino_t *inum,
xfs_inode_t **ipp)
{
int error;
xfs_itrace_entry(dp);
error = xfs_dir_lookup(NULL, dp, name, inum);
if (!error) {
/*
* Unlock the directory. We do this because we can't
* hold the directory lock while doing the vn_get()
* in xfs_iget(). Doing so could cause us to hold
* a lock while waiting for the inode to finish
* being inactive while it's waiting for a log
* reservation in the inactive routine.
*/
xfs_iunlock(dp, lock_mode);
error = xfs_iget(dp->i_mount, NULL, *inum, 0, 0, ipp, 0);
xfs_ilock(dp, lock_mode);
if (error) {
*ipp = NULL;
} else if ((*ipp)->i_d.di_mode == 0) {
/*
* The inode has been freed. Something is
* wrong so just get out of here.
*/
xfs_iunlock(dp, lock_mode);
xfs_iput_new(*ipp, 0);
*ipp = NULL;
xfs_ilock(dp, lock_mode);
error = XFS_ERROR(ENOENT);
}
}
return error;
}
/* /*
* Allocates a new inode from disk and return a pointer to the * Allocates a new inode from disk and return a pointer to the
* incore copy. This routine will internally commit the current * incore copy. This routine will internally commit the current
...@@ -310,7 +267,7 @@ xfs_bump_ino_vers2( ...@@ -310,7 +267,7 @@ xfs_bump_ino_vers2(
{ {
xfs_mount_t *mp; xfs_mount_t *mp;
ASSERT(ismrlocked (&ip->i_lock, MR_UPDATE)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1); ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1);
ip->i_d.di_version = XFS_DINODE_VERSION_2; ip->i_d.di_version = XFS_DINODE_VERSION_2;
......
...@@ -21,8 +21,6 @@ ...@@ -21,8 +21,6 @@
#define IRELE(ip) VN_RELE(XFS_ITOV(ip)) #define IRELE(ip) VN_RELE(XFS_ITOV(ip))
#define IHOLD(ip) VN_HOLD(XFS_ITOV(ip)) #define IHOLD(ip) VN_HOLD(XFS_ITOV(ip))
extern int xfs_dir_lookup_int(xfs_inode_t *, uint, struct xfs_name *,
xfs_ino_t *, xfs_inode_t **);
extern int xfs_truncate_file(xfs_mount_t *, xfs_inode_t *); extern int xfs_truncate_file(xfs_mount_t *, xfs_inode_t *);
extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t, extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t,
xfs_dev_t, cred_t *, prid_t, int, xfs_dev_t, cred_t *, prid_t, int,
......
...@@ -186,6 +186,7 @@ xfs_cleanup(void) ...@@ -186,6 +186,7 @@ xfs_cleanup(void)
kmem_zone_destroy(xfs_efi_zone); kmem_zone_destroy(xfs_efi_zone);
kmem_zone_destroy(xfs_ifork_zone); kmem_zone_destroy(xfs_ifork_zone);
kmem_zone_destroy(xfs_ili_zone); kmem_zone_destroy(xfs_ili_zone);
kmem_zone_destroy(xfs_log_ticket_zone);
} }
/* /*
......
This diff is collapsed.
...@@ -15,7 +15,6 @@ struct xfs_iomap; ...@@ -15,7 +15,6 @@ struct xfs_iomap;
int xfs_open(struct xfs_inode *ip); int xfs_open(struct xfs_inode *ip);
int xfs_getattr(struct xfs_inode *ip, struct bhv_vattr *vap, int flags);
int xfs_setattr(struct xfs_inode *ip, struct bhv_vattr *vap, int flags, int xfs_setattr(struct xfs_inode *ip, struct bhv_vattr *vap, int flags,
struct cred *credp); struct cred *credp);
int xfs_readlink(struct xfs_inode *ip, char *link); int xfs_readlink(struct xfs_inode *ip, char *link);
...@@ -48,9 +47,9 @@ int xfs_change_file_space(struct xfs_inode *ip, int cmd, ...@@ -48,9 +47,9 @@ int xfs_change_file_space(struct xfs_inode *ip, int cmd,
struct cred *credp, int attr_flags); struct cred *credp, int attr_flags);
int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name, int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name,
struct xfs_inode *src_ip, struct xfs_inode *target_dp, struct xfs_inode *src_ip, struct xfs_inode *target_dp,
struct xfs_name *target_name); struct xfs_name *target_name, struct xfs_inode *target_ip);
int xfs_attr_get(struct xfs_inode *ip, const char *name, char *value, int xfs_attr_get(struct xfs_inode *ip, const char *name, char *value,
int *valuelenp, int flags, cred_t *cred); int *valuelenp, int flags);
int xfs_attr_set(struct xfs_inode *dp, const char *name, char *value, int xfs_attr_set(struct xfs_inode *dp, const char *name, char *value,
int valuelen, int flags); int valuelen, int flags);
int xfs_attr_remove(struct xfs_inode *dp, const char *name, int flags); int xfs_attr_remove(struct xfs_inode *dp, const char *name, int flags);
...@@ -61,9 +60,6 @@ int xfs_ioctl(struct xfs_inode *ip, struct file *filp, ...@@ -61,9 +60,6 @@ int xfs_ioctl(struct xfs_inode *ip, struct file *filp,
ssize_t xfs_read(struct xfs_inode *ip, struct kiocb *iocb, ssize_t xfs_read(struct xfs_inode *ip, struct kiocb *iocb,
const struct iovec *iovp, unsigned int segs, const struct iovec *iovp, unsigned int segs,
loff_t *offset, int ioflags); loff_t *offset, int ioflags);
ssize_t xfs_sendfile(struct xfs_inode *ip, struct file *filp,
loff_t *offset, int ioflags, size_t count,
read_actor_t actor, void *target);
ssize_t xfs_splice_read(struct xfs_inode *ip, struct file *infilp, ssize_t xfs_splice_read(struct xfs_inode *ip, struct file *infilp,
loff_t *ppos, struct pipe_inode_info *pipe, size_t count, loff_t *ppos, struct pipe_inode_info *pipe, size_t count,
int flags, int ioflags); int flags, int ioflags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment