Commit 07c65489 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Nathan Scott

[XFS] Separate the quota source into its own subdirectory ala dmapi.

Push a bunch of quota- and dmapi-specific code down into these
subdirs which previously was compiled into the core XFS code,
and don't descend into these subdirs if options config'd off.

SGI Modid: 2.5.x-xfs:slinx:141850a
parent 07f08be5
......@@ -29,8 +29,6 @@
#
# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
#
# Makefile for XFS on Linux.
#
EXTRA_CFLAGS += -Ifs/xfs -funsigned-char
......@@ -44,18 +42,22 @@ endif
obj-$(CONFIG_XFS_FS) += xfs.o
xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o
xfs-$(CONFIG_XFS_QUOTA) += xfs_dquot.o \
xfs-$(CONFIG_XFS_QUOTA) += $(addprefix quota/, \
xfs_dquot.o \
xfs_dquot_item.o \
xfs_trans_dquot.o \
xfs_qm_syscalls.o \
xfs_qmops.o \
xfs_qm.o
xfs_qm_bhv.o \
xfs_qm.o)
ifeq ($(CONFIG_XFS_QUOTA),y)
xfs-$(CONFIG_PROC_FS) += quota/xfs_qm_stats.o
endif
xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o
xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o
xfs-$(CONFIG_FS_POSIX_CAP) += xfs_cap.o
xfs-$(CONFIG_FS_POSIX_MAC) += xfs_mac.o
xfs-$(CONFIG_XFS_POSIX_CAP) += xfs_cap.o
xfs-$(CONFIG_XFS_POSIX_MAC) += xfs_mac.o
xfs-$(CONFIG_PROC_FS) += linux/xfs_stats.o
xfs-$(CONFIG_SYSCTL) += linux/xfs_sysctl.o
......@@ -137,6 +139,10 @@ xfs-y += $(addprefix support/, \
qsort.o \
uuid.o)
# Quota and DMAPI stubs
xfs-y += xfs_dmops.o \
xfs_qmops.o
# If both xfs and kdb modules are built in then xfsidbg is built in. If xfs is
# a module and kdb modules are being compiled then xfsidbg must be a module, to
# follow xfs. If xfs is built in then xfsidbg tracks the kdb module state.
......
......@@ -299,7 +299,9 @@ linvfs_file_mmap(
int error;
if ((vp->v_type == VREG) && (vp->v_vfsp->vfs_flag & VFS_DMI)) {
error = -xfs_dm_send_mmap_event(vma, 0);
xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
error = -XFS_SEND_MMAP(mp, vma, 0);
if (error)
return error;
}
......@@ -345,8 +347,10 @@ linvfs_mprotect(
if ((vp->v_type == VREG) && (vp->v_vfsp->vfs_flag & VFS_DMI)) {
if ((vma->vm_flags & VM_MAYSHARE) &&
(newflags & PROT_WRITE) && !(vma->vm_flags & PROT_WRITE)){
error = xfs_dm_send_mmap_event(vma, VM_WRITE);
(newflags & PROT_WRITE) && !(vma->vm_flags & PROT_WRITE)) {
xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
error = XFS_SEND_MMAP(mp, vma, VM_WRITE);
}
}
return error;
......
......@@ -58,17 +58,7 @@ spinlock_t Atomic_spin = SPIN_LOCK_UNLOCKED;
*/
cred_t sys_cred_val, *sys_cred = &sys_cred_val;
/*
* The global quota manager. There is only one of these for the entire
* system, _not_ one per file system. XQM keeps track of the overall
* quota functionality, including maintaining the freelist and hash
* tables of dquots.
*/
struct xfs_qm *xfs_Gqm;
mutex_t xfs_Gqm_lock;
/* Export XFS symbols used by xfsidbg */
EXPORT_SYMBOL(xfs_Gqm);
EXPORT_SYMBOL(xfs_next_bit);
EXPORT_SYMBOL(xfs_contig_bits);
EXPORT_SYMBOL(xfs_bmbt_get_all);
......
......@@ -43,7 +43,4 @@ extern unsigned long xfs_physmem;
extern struct cred *sys_cred;
extern struct xfs_qm *xfs_Gqm;
extern mutex_t xfs_Gqm_lock;
#endif /* __XFS_GLOBALS_H__ */
......@@ -258,11 +258,10 @@ xfs_iomap_write_direct(
* the ilock across a disk read.
*/
if (XFS_IS_QUOTA_ON(mp) && XFS_NOT_DQATTACHED(mp, ip)) {
if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_ILOCKED))) {
error = XFS_QM_DQATTACH(ip->i_mount, ip, XFS_QMOPT_ILOCKED);
if (error)
return XFS_ERROR(error);
}
}
maps = min(XFS_WRITE_IMAPS, *nmaps);
nimaps = maps;
......@@ -291,7 +290,7 @@ xfs_iomap_write_direct(
* determine if reserving space on
* the data or realtime partition.
*/
if ((rt = ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) {
if ((rt = XFS_IS_REALTIME_INODE(ip))) {
int sbrtextsize, iprtextsize;
sbrtextsize = mp->m_sb.sb_rextsize;
......@@ -333,12 +332,10 @@ xfs_iomap_write_direct(
goto error_out; /* Don't return in above if .. trans ..,
need lock to return */
if (XFS_IS_QUOTA_ON(mp)) {
if (xfs_trans_reserve_blkquota(tp, ip, resblks)) {
if (XFS_TRANS_RESERVE_BLKQUOTA(mp, tp, ip, resblks)) {
error = (EDQUOT);
goto error1;
}
}
nimaps = 1;
bmapi_flag = XFS_BMAPI_WRITE;
......@@ -422,11 +419,9 @@ xfs_iomap_write_delay(
* the ilock across a disk read.
*/
if (XFS_IS_QUOTA_ON(mp) && XFS_NOT_DQATTACHED(mp, ip)) {
if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_ILOCKED))) {
error = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED);
if (error)
return XFS_ERROR(error);
}
}
retry:
isize = ip->i_d.di_size;
......@@ -538,11 +533,8 @@ xfs_iomap_write_allocate(
* Make sure that the dquots are there.
*/
if (XFS_IS_QUOTA_ON(mp) && XFS_NOT_DQATTACHED(mp, ip)) {
if ((error = xfs_qm_dqattach(ip, 0))) {
if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
return XFS_ERROR(error);
}
}
offset_fsb = map->br_startoff;
count_fsb = map->br_blockcount;
......
......@@ -121,7 +121,8 @@ xfs_read(
xfs_mount_t *mp;
vnode_t *vp;
unsigned long seg;
int direct = filp->f_flags & O_DIRECT;
int direct = (filp->f_flags & O_DIRECT);
int invisible = (filp->f_mode & FINVIS);
ip = XFS_BHVTOI(bdp);
vp = BHV_TO_VNODE(bdp);
......@@ -180,13 +181,12 @@ xfs_read(
xfs_ilock(ip, XFS_IOLOCK_SHARED);
if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
!(filp->f_mode & FINVIS)) {
if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) && !invisible) {
int error;
vrwlock_t locktype = VRWLOCK_READ;
error = xfs_dm_send_data_event(DM_EVENT_READ, bdp, *offp,
size, FILP_DELAY_FLAG(filp), &locktype);
error = XFS_SEND_DATA(mp, DM_EVENT_READ, bdp, *offp, size,
FILP_DELAY_FLAG(filp), &locktype);
if (error) {
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
return -error;
......@@ -198,7 +198,7 @@ xfs_read(
XFS_STATS_ADD(xfsstats.xs_read_bytes, ret);
if (!(filp->f_mode & FINVIS))
if (!invisible)
xfs_ichgtime(ip, XFS_ICHGTIME_ACC);
return ret;
......@@ -217,11 +217,13 @@ xfs_sendfile(
ssize_t ret;
xfs_fsize_t n;
xfs_inode_t *ip;
xfs_mount_t *mp;
vnode_t *vp;
int invisible = (filp->f_mode & FINVIS);
ip = XFS_BHVTOI(bdp);
vp = BHV_TO_VNODE(bdp);
mp = ip->i_mount;
vn_trace_entry(vp, "xfs_sendfile", (inst_t *)__return_address);
XFS_STATS_INC(xfsstats.xs_read_calls);
......@@ -241,8 +243,8 @@ xfs_sendfile(
vrwlock_t locktype = VRWLOCK_READ;
int error;
error = xfs_dm_send_data_event(DM_EVENT_READ, bdp, *offp,
count, FILP_DELAY_FLAG(filp), &locktype);
error = XFS_SEND_DATA(mp, DM_EVENT_READ, bdp, *offp, count,
FILP_DELAY_FLAG(filp), &locktype);
if (error) {
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
return -error;
......@@ -493,7 +495,8 @@ xfs_write(
vnode_t *vp;
unsigned long seg;
int iolock;
int direct = file->f_flags & O_DIRECT;
int direct = (file->f_flags & O_DIRECT);
int invisible = (file->f_mode & FINVIS);
int eventsent = 0;
vrwlock_t locktype;
......@@ -573,11 +576,11 @@ xfs_write(
}
if ((DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_WRITE) &&
!(file->f_mode & FINVIS) && !eventsent)) {
!invisible && !eventsent)) {
loff_t savedsize = *offset;
xfs_iunlock(xip, XFS_ILOCK_EXCL);
error = xfs_dm_send_data_event(DM_EVENT_WRITE, bdp,
error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, bdp,
*offset, size,
FILP_DELAY_FLAG(file), &locktype);
if (error) {
......@@ -588,12 +591,11 @@ xfs_write(
eventsent = 1;
/*
* The iolock was dropped and reaquired in
* xfs_dm_send_data_event so we have to recheck the size
* when appending. We will only "goto start;" once,
* since having sent the event prevents another call
* to xfs_dm_send_data_event, which is what
* allows the size to change in the first place.
* The iolock was dropped and reaquired in XFS_SEND_DATA
* so we have to recheck the size when appending.
* We will only "goto start;" once, since having sent the
* event prevents another call to XFS_SEND_DATA, which is
* what allows the size to change in the first place.
*/
if ((file->f_flags & O_APPEND) &&
savedsize != xip->i_d.di_size) {
......@@ -608,10 +610,8 @@ xfs_write(
*
* We must update xfs' times since revalidate will overcopy xfs.
*/
if (size) {
if (!(file->f_mode & FINVIS))
if (size && !invisible)
xfs_ichgtime(xip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
}
/*
* If the offset is beyond the size of the file, we have a couple
......@@ -658,11 +658,10 @@ xfs_write(
ret = generic_file_write_nolock(file, iovp, segs, offset);
if ((ret == -ENOSPC) &&
DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) &&
!(file->f_mode & FINVIS)) {
DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) && !invisible) {
xfs_rwunlock(bdp, locktype);
error = dm_send_namesp_event(DM_EVENT_NOSPACE, bdp,
error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, bdp,
DM_RIGHT_NULL, bdp, DM_RIGHT_NULL, NULL, NULL,
0, 0, 0); /* Delay flag intentionally unused */
if (error)
......
/*
* Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -61,7 +61,6 @@ xfs_read_xfsstats(
{ "xstrat", XFSSTAT_END_WRITE_CONVERT },
{ "rw", XFSSTAT_END_READ_WRITE_OPS },
{ "attr", XFSSTAT_END_ATTRIBUTE_OPS },
{ "qm", XFSSTAT_END_QUOTA_OPS },
{ "icluster", XFSSTAT_END_INODE_CLUSTER },
{ "vnodes", XFSSTAT_END_VNODE_OPS },
};
......@@ -95,50 +94,17 @@ xfs_read_xfsstats(
return len;
}
STATIC int
xfs_read_xfsquota(
char *buffer,
char **start,
off_t offset,
int count,
int *eof,
void *data)
{
int len;
/* maximum; incore; ratio free to inuse; freelist */
len = sprintf(buffer, "%d\t%d\t%d\t%u\n",
ndquot,
xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0,
xfs_Gqm? xfs_Gqm->qm_dqfree_ratio : 0,
xfs_Gqm? xfs_Gqm->qm_dqfreelist.qh_nelems : 0);
if (offset >= len) {
*start = buffer;
*eof = 1;
return 0;
}
*start = buffer + offset;
if ((len -= offset) > count)
return count;
*eof = 1;
return len;
}
void
xfs_init_procfs(void)
{
if (!proc_mkdir("fs/xfs", 0))
return;
create_proc_read_entry("fs/xfs/stat", 0, 0, xfs_read_xfsstats, NULL);
create_proc_read_entry("fs/xfs/xqm", 0, 0, xfs_read_xfsquota, NULL);
}
void
xfs_cleanup_procfs(void)
{
remove_proc_entry("fs/xfs/stat", NULL);
remove_proc_entry("fs/xfs/xqm", NULL);
remove_proc_entry("fs/xfs", NULL);
}
......@@ -107,16 +107,7 @@ struct xfsstats {
__uint32_t xs_attr_set;
__uint32_t xs_attr_remove;
__uint32_t xs_attr_list;
# define XFSSTAT_END_QUOTA_OPS (XFSSTAT_END_ATTRIBUTE_OPS+8)
__uint32_t xs_qm_dqreclaims;
__uint32_t xs_qm_dqreclaim_misses;
__uint32_t xs_qm_dquot_dups;
__uint32_t xs_qm_dqcachemisses;
__uint32_t xs_qm_dqcachehits;
__uint32_t xs_qm_dqwants;
__uint32_t xs_qm_dqshake_reclaims;
__uint32_t xs_qm_dqinact_reclaims;
# define XFSSTAT_END_INODE_CLUSTER (XFSSTAT_END_QUOTA_OPS+3)
# define XFSSTAT_END_INODE_CLUSTER (XFSSTAT_END_ATTRIBUTE_OPS+3)
__uint32_t xs_iflush_count;
__uint32_t xs_icluster_flushcnt;
__uint32_t xs_icluster_flushinode;
......
......@@ -953,33 +953,27 @@ init_xfs_fs( void )
error = init_inodecache();
if (error < 0)
goto undo_inodecache;
error = pagebuf_init();
if (error < 0)
goto undo_pagebuf;
vn_init();
xfs_init();
error = vfs_initdmapi();
if (error < 0)
goto undo_dmapi;
error = vfs_initquota();
if (error < 0)
goto undo_quota;
vfs_initdmapi();
vfs_initquota();
error = register_filesystem(&xfs_fs_type);
if (error)
goto undo_fs;
goto undo_register;
return 0;
undo_fs:
vfs_exitquota();
undo_quota:
vfs_exitdmapi();
undo_dmapi:
undo_register:
pagebuf_terminate();
undo_pagebuf:
destroy_inodecache();
undo_inodecache:
return error;
}
......
......@@ -32,42 +32,38 @@
#ifndef __XFS_SUPER_H__
#define __XFS_SUPER_H__
#ifdef CONFIG_XFS_POSIX_ACL
# define XFS_ACL_STRING "ACLs, "
# define set_posix_acl_flag(sb) ((sb)->s_flags |= MS_POSIXACL)
#else
# define XFS_ACL_STRING
# define set_posix_acl_flag(sb) do { } while (0)
#endif
#ifdef CONFIG_XFS_DMAPI
# define XFS_DMAPI_STRING "DMAPI, "
# define vfs_insertdmapi(vfs) vfs_insertops(vfsp, &xfs_dmops_xfs)
# define vfs_initdmapi() (0) /* temporarily */
# define vfs_exitdmapi() do { } while (0) /* temporarily */
# define vfs_insertdmapi(vfs) vfs_insertops(vfsp, &xfs_dmops)
# define vfs_initdmapi() xfs_dm_init()
# define vfs_exitdmapi() xfs_dm_exit()
#else
# define XFS_DMAPI_STRING
# define vfs_insertdmapi(vfs) do { } while (0)
# define vfs_initdmapi() (0)
# define vfs_initdmapi() do { } while (0)
# define vfs_exitdmapi() do { } while (0)
#endif
#ifdef CONFIG_XFS_QUOTA
# define XFS_QUOTA_STRING "quota, "
# define vfs_insertquota(vfs) vfs_insertops(vfsp, &xfs_qmops_xfs)
# define vfs_initquota() (0) /* temporarily */
# define vfs_exitquota() do { } while (0) /* temporarily */
# define vfs_insertquota(vfs) vfs_insertops(vfsp, &xfs_qmops)
# define vfs_initquota() xfs_qm_init()
# define vfs_exitquota() xfs_qm_exit()
#else
# define XFS_QUOTA_STRING
# define vfs_insertquota(vfs) do { } while (0)
# define vfs_initquota() (0)
# define vfs_initquota() do { } while (0)
# define vfs_exitquota() do { } while (0)
#endif
#ifdef CONFIG_XFS_POSIX_ACL
# define XFS_ACL_STRING "ACLs, "
# define set_posix_acl_flag(sb) ((sb)->s_flags |= MS_POSIXACL)
#else
# define XFS_ACL_STRING
# define set_posix_acl_flag(sb) do { } while (0)
#endif
#ifdef CONFIG_XFS_RT
# define XFS_RT_STRING "realtime, "
# define XFS_REALTIME_STRING "realtime, "
#else
# define XFS_RT_STRING
# define XFS_REALTIME_STRING
#endif
#ifdef CONFIG_XFS_VNODE_TRACING
......@@ -82,9 +78,9 @@
# define XFS_DBG_STRING "no debug"
#endif
#define XFS_BUILD_OPTIONS XFS_ACL_STRING XFS_DMAPI_STRING \
XFS_RT_STRING \
XFS_QUOTA_STRING XFS_VNTRACE_STRING \
#define XFS_BUILD_OPTIONS XFS_ACL_STRING \
XFS_REALTIME_STRING \
XFS_VNTRACE_STRING \
XFS_DBG_STRING /* DBG must be last */
#define LINVFS_GET_VFS(s) \
......
......@@ -222,7 +222,7 @@ vfs_deallocate(
void
vfs_insertops(
struct vfs *vfsp,
struct vfsops *vfsops)
struct bhv_vfsops *vfsops)
{
struct bhv_desc *bdp;
......
......@@ -165,9 +165,19 @@ extern int vfs_quotactl(bhv_desc_t *, int, int, caddr_t);
extern void vfs_init_vnode(bhv_desc_t *, struct vnode *, bhv_desc_t *, int);
extern void vfs_force_shutdown(bhv_desc_t *, int, char *, int);
typedef struct bhv_vfsops {
struct vfsops bhv_common;
void * bhv_custom;
} bhv_vfsops_t;
#define vfs_bhv_lookup(v, id) ( bhv_lookup_range(&(v)->vfs_bh, (id), (id)) )
#define vfs_bhv_custom(b) ( ((bhv_vfsops_t *)BHV_OPS(b))->bhv_custom )
#define vfs_bhv_set_custom(b,o) ( (b)->bhv_custom = (void *)(o))
#define vfs_bhv_clr_custom(b) ( (b)->bhv_custom = NULL )
extern vfs_t *vfs_allocate(void);
extern void vfs_deallocate(vfs_t *);
extern void vfs_insertops(vfs_t *, vfsops_t *);
extern void vfs_insertops(vfs_t *, bhv_vfsops_t *);
extern void vfs_insertbhv(vfs_t *, bhv_desc_t *, vfsops_t *, void *);
extern void bhv_insert_all_vfsops(struct vfs *);
......
/*
* Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -31,7 +31,7 @@
*/
#include <xfs.h>
#include <xfs_quota_priv.h>
#include "xfs_qm.h"
/*
......@@ -290,37 +290,45 @@ xfs_qm_dqwarn(
warned = 0;
if (INT_GET(d->d_blk_softlimit, ARCH_CONVERT) &&
(INT_GET(d->d_bcount, ARCH_CONVERT) >= INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) {
(INT_GET(d->d_bcount, ARCH_CONVERT) >=
INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) {
if (flags & XFS_QMOPT_DOWARN) {
INT_MOD(d->d_bwarns, ARCH_CONVERT, +1);
warned++;
}
} else {
if (INT_ISZERO(d->d_blk_softlimit, ARCH_CONVERT) ||
(INT_GET(d->d_bcount, ARCH_CONVERT) < INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) {
(INT_GET(d->d_bcount, ARCH_CONVERT) <
INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) {
INT_ZERO(d->d_bwarns, ARCH_CONVERT);
}
}
if (INT_GET(d->d_ino_softlimit, ARCH_CONVERT) > 0 &&
(INT_GET(d->d_icount, ARCH_CONVERT) >= INT_GET(d->d_ino_softlimit, ARCH_CONVERT))) {
(INT_GET(d->d_icount, ARCH_CONVERT) >=
INT_GET(d->d_ino_softlimit, ARCH_CONVERT))) {
if (flags & XFS_QMOPT_DOWARN) {
INT_MOD(d->d_iwarns, ARCH_CONVERT, +1);
warned++;
}
} else {
if ((INT_ISZERO(d->d_ino_softlimit, ARCH_CONVERT)) ||
(INT_GET(d->d_icount, ARCH_CONVERT) < INT_GET(d->d_ino_softlimit, ARCH_CONVERT))) {
(INT_GET(d->d_icount, ARCH_CONVERT) <
INT_GET(d->d_ino_softlimit, ARCH_CONVERT))) {
INT_ZERO(d->d_iwarns, ARCH_CONVERT);
}
}
#ifdef QUOTADEBUG
if (INT_GET(d->d_iwarns, ARCH_CONVERT))
printk("--------@@Inode warnings running : %Lu >= %Lu\n",
INT_GET(d->d_icount, ARCH_CONVERT), INT_GET(d->d_ino_softlimit, ARCH_CONVERT));
cmn_err(CE_DEBUG,
"--------@@Inode warnings running : %Lu >= %Lu",
INT_GET(d->d_icount, ARCH_CONVERT),
INT_GET(d->d_ino_softlimit, ARCH_CONVERT));
if (INT_GET(d->d_bwarns, ARCH_CONVERT))
printk("--------@@Blks warnings running : %Lu >= %Lu\n",
INT_GET(d->d_bcount, ARCH_CONVERT), INT_GET(d->d_blk_softlimit, ARCH_CONVERT));
cmn_err(CE_DEBUG,
"--------@@Blks warnings running : %Lu >= %Lu",
INT_GET(d->d_bcount, ARCH_CONVERT),
INT_GET(d->d_blk_softlimit, ARCH_CONVERT));
#endif
return (warned);
}
......@@ -869,7 +877,7 @@ xfs_qm_dqget(
if (xfs_do_dqerror) {
if ((xfs_dqerror_dev == mp->m_dev) &&
(xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
printk("Returning error in dqget\n");
cmn_err(CE_DEBUG, "Returning error in dqget");
return (EIO);
}
}
......@@ -894,7 +902,7 @@ xfs_qm_dqget(
* The chain is kept locked during lookup.
*/
if (xfs_qm_dqlookup(mp, id, h, O_dqpp) == 0) {
XFS_STATS_INC(xfsstats.xs_qm_dqcachehits);
XQM_STATS_INC(xqmstats.xs_qm_dqcachehits);
/*
* The dquot was found, moved to the front of the chain,
* taken off the freelist if it was on it, and locked
......@@ -906,7 +914,7 @@ xfs_qm_dqget(
xfs_dqtrace_entry(*O_dqpp, "DQGET DONE (FROM CACHE)");
return (0); /* success */
}
XFS_STATS_INC(xfsstats.xs_qm_dqcachemisses);
XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses);
/*
* Dquot cache miss. We don't want to keep the inode lock across
......@@ -1006,7 +1014,7 @@ xfs_qm_dqget(
xfs_qm_dqput(tmpdqp);
XFS_DQ_HASH_UNLOCK(h);
xfs_qm_dqdestroy(dqp);
XFS_STATS_INC(xfsstats.xs_qm_dquot_dups);
XQM_STATS_INC(xqmstats.xs_qm_dquot_dups);
goto again;
}
}
......@@ -1372,18 +1380,6 @@ xfs_dqlock2(
}
/*
* A rarely used accessor. This exists because we don't really want
* to expose the internals of a dquot to the outside world.
*/
xfs_dqid_t
xfs_qm_dqid(
xfs_dquot_t *dqp)
{
return (INT_GET(dqp->q_core.d_id, ARCH_CONVERT));
}
/*
* Take a dquot out of the mount's dqlist as well as the hashlist.
* This is called via unmount as well as quotaoff, and the purge
......@@ -1483,145 +1479,41 @@ xfs_qm_dqpurge(
}
/*
* Do some primitive error checking on ondisk dquot
* data structures. Not just for debugging, actually;
* this can be useful for detecting data corruption mainly due to
* disk failures.
*/
/* ARGSUSED */
int
xfs_qm_dqcheck(
xfs_disk_dquot_t *ddq,
xfs_dqid_t id,
uint type, /* used only when IO_dorepair is true */
uint flags,
char *str)
{
int errs;
errs = 0;
/* ASSERT(flags & (XFS_QMOPT_DQREPAIR|XFS_QMOPT_DOWARN)); */
/*
* We can encounter an uninitialized dquot buffer for 2 reasons:
* 1. If we crash while deleting the quotainode(s), and those blks get used
* for some user data. This is because we take the path of regular
* file deletion; however, the size field of quotainodes is never
* updated, so all the tricks that we play in itruncate_finish
* don't quite matter.
*
* 2. We don't play the quota buffers when there's a quotaoff logitem.
* But the allocation will be replayed so we'll end up with an
* uninitialized quota block.
*
* This is all fine; things are still consistent, and we haven't lost
* any quota information. Just don't complain about bad dquot blks.
*/
if (INT_GET(ddq->d_magic, ARCH_CONVERT) != XFS_DQUOT_MAGIC) {
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_ALERT,
"%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
str, id, INT_GET(ddq->d_magic, ARCH_CONVERT), XFS_DQUOT_MAGIC);
errs++;
}
if (INT_GET(ddq->d_version, ARCH_CONVERT) != XFS_DQUOT_VERSION) {
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_ALERT,
"%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
str, id, INT_GET(ddq->d_magic, ARCH_CONVERT), XFS_DQUOT_VERSION);
errs++;
}
if (INT_GET(ddq->d_flags, ARCH_CONVERT) != XFS_DQ_USER && INT_GET(ddq->d_flags, ARCH_CONVERT) != XFS_DQ_GROUP) {
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_ALERT,
"%s : XFS dquot ID 0x%x, unknown flags 0x%x",
str, id, INT_GET(ddq->d_flags, ARCH_CONVERT));
errs++;
}
if (id != -1 && id != INT_GET(ddq->d_id, ARCH_CONVERT)) {
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_ALERT,
"%s : ondisk-dquot 0x%x, ID mismatch: "
"0x%x expected, found id 0x%x",
str, ddq, id, INT_GET(ddq->d_id, ARCH_CONVERT));
errs++;
}
if (! errs) {
if (INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT) &&
INT_GET(ddq->d_bcount, ARCH_CONVERT) >= INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT)) {
if (INT_ISZERO(ddq->d_btimer, ARCH_CONVERT) && !INT_ISZERO(ddq->d_id, ARCH_CONVERT)) {
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_ALERT,
"%s : Dquot ID 0x%x (0x%x) "
"BLK TIMER NOT STARTED",
str, (int) INT_GET(ddq->d_id, ARCH_CONVERT), ddq);
errs++;
}
}
if (INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT) &&
INT_GET(ddq->d_icount, ARCH_CONVERT) >= INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT)) {
if (INT_ISZERO(ddq->d_itimer, ARCH_CONVERT) && !INT_ISZERO(ddq->d_id, ARCH_CONVERT)) {
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_ALERT,
"%s : Dquot ID 0x%x (0x%x) "
"INODE TIMER NOT STARTED",
str, (int) INT_GET(ddq->d_id, ARCH_CONVERT), ddq);
errs++;
}
}
}
if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
return (errs);
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id);
/*
* Typically, a repair is only requested by quotacheck.
*/
ASSERT(id != -1);
ASSERT(flags & XFS_QMOPT_DQREPAIR);
memset(ddq, 0, sizeof(xfs_dqblk_t));
xfs_qm_dqinit_core(id, type, (xfs_dqblk_t *)ddq);
return (errs);
}
#ifdef QUOTADEBUG
void
xfs_qm_dqprint(xfs_dquot_t *dqp)
{
printk( "-----------KERNEL DQUOT----------------\n");
printk( "---- dquot ID = %d\n", (int) INT_GET(dqp->q_core.d_id, ARCH_CONVERT));
printk( "---- type = %s\n", XFS_QM_ISUDQ(dqp) ? "USR" : "GRP");
printk( "---- fs = 0x%p\n", dqp->q_mount);
printk( "---- blkno = 0x%x\n", (int) dqp->q_blkno);
printk( "---- boffset = 0x%x\n", (int) dqp->q_bufoffset);
printk( "---- blkhlimit = %Lu (0x%x)\n",
cmn_err(CE_DEBUG, "-----------KERNEL DQUOT----------------");
cmn_err(CE_DEBUG, "---- dquotID = %d",
(int)INT_GET(dqp->q_core.d_id, ARCH_CONVERT));
cmn_err(CE_DEBUG, "---- type = %s",
XFS_QM_ISUDQ(dqp) ? "USR" : "GRP");
cmn_err(CE_DEBUG, "---- fs = 0x%p", dqp->q_mount);
cmn_err(CE_DEBUG, "---- blkno = 0x%x", (int) dqp->q_blkno);
cmn_err(CE_DEBUG, "---- boffset = 0x%x", (int) dqp->q_bufoffset);
cmn_err(CE_DEBUG, "---- blkhlimit = %Lu (0x%x)",
INT_GET(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT),
(int) INT_GET(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT));
printk( "---- blkslimit = %Lu (0x%x)\n",
cmn_err(CE_DEBUG, "---- blkslimit = %Lu (0x%x)",
INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT),
(int)INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT));
printk( "---- inohlimit = %Lu (0x%x)\n",
cmn_err(CE_DEBUG, "---- inohlimit = %Lu (0x%x)",
INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT),
(int)INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT));
printk( "---- inoslimit = %Lu (0x%x)\n",
cmn_err(CE_DEBUG, "---- inoslimit = %Lu (0x%x)",
INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT),
(int)INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT));
printk( "---- bcount = %Lu (0x%x)\n",
cmn_err(CE_DEBUG, "---- bcount = %Lu (0x%x)",
INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT),
(int)INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT));
printk( "---- icount = %Lu (0x%x)\n",
cmn_err(CE_DEBUG, "---- icount = %Lu (0x%x)",
INT_GET(dqp->q_core.d_icount, ARCH_CONVERT),
(int)INT_GET(dqp->q_core.d_icount, ARCH_CONVERT));
printk( "---- btimer = %d\n", (int)INT_GET(dqp->q_core.d_btimer, ARCH_CONVERT));
printk( "---- itimer = %d\n", (int)INT_GET(dqp->q_core.d_itimer, ARCH_CONVERT));
printk( "---------------------------\n");
cmn_err(CE_DEBUG, "---- btimer = %d",
(int)INT_GET(dqp->q_core.d_btimer, ARCH_CONVERT));
cmn_err(CE_DEBUG, "---- itimer = %d",
(int)INT_GET(dqp->q_core.d_itimer, ARCH_CONVERT));
cmn_err(CE_DEBUG, "---------------------------");
}
#endif
......
......@@ -146,6 +146,7 @@ XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp)
}
#endif
/*
* The following three routines simply manage the q_flock
* semaphore embedded in the dquot. This semaphore synchronizes
......@@ -197,7 +198,6 @@ extern void xfs_qm_dqprint(xfs_dquot_t *);
#define xfs_qm_dqprint(a)
#endif
extern xfs_dquot_t *xfs_qm_dqinit(xfs_mount_t *, xfs_dqid_t, uint);
extern void xfs_qm_dqdestroy(xfs_dquot_t *);
extern int xfs_qm_dqflush(xfs_dquot_t *, uint);
extern int xfs_qm_dqpurge(xfs_dquot_t *, uint);
......@@ -208,5 +208,13 @@ extern void xfs_qm_dqflock_pushbuf_wait(xfs_dquot_t *dqp);
extern void xfs_qm_adjust_dqtimers(xfs_mount_t *,
xfs_disk_dquot_t *);
extern int xfs_qm_dqwarn(xfs_disk_dquot_t *, uint);
extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *,
xfs_dqid_t, uint, uint, xfs_dquot_t **);
extern void xfs_qm_dqput(xfs_dquot_t *);
extern void xfs_qm_dqrele(xfs_dquot_t *);
extern void xfs_dqlock(xfs_dquot_t *);
extern void xfs_dqlock2(xfs_dquot_t *, xfs_dquot_t *);
extern void xfs_dqunlock(xfs_dquot_t *);
extern void xfs_dqunlock_nonotify(xfs_dquot_t *);
#endif /* __XFS_DQUOT_H__ */
/*
* Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -31,7 +31,7 @@
*/
#include <xfs.h>
#include <xfs_quota_priv.h>
#include "xfs_qm.h"
/*
......
/*
* Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -32,58 +32,22 @@
#ifndef __XFS_DQUOT_ITEM_H__
#define __XFS_DQUOT_ITEM_H__
/*
* These are the structures used to lay out dquots and quotaoff
* records on the log. Quite similar to those of inodes.
*/
/*
* log format struct for dquots.
* The first two fields must be the type and size fitting into
* 32 bits : log_recovery code assumes that.
*/
typedef struct xfs_dq_logformat {
__uint16_t qlf_type; /* dquot log item type */
__uint16_t qlf_size; /* size of this item */
xfs_dqid_t qlf_id; /* usr/grp id number : 32 bits */
__int64_t qlf_blkno; /* blkno of dquot buffer */
__int32_t qlf_len; /* len of dquot buffer */
__uint32_t qlf_boffset; /* off of dquot in buffer */
} xfs_dq_logformat_t;
/*
* log format struct for QUOTAOFF records.
* The first two fields must be the type and size fitting into
* 32 bits : log_recovery code assumes that.
* We write two LI_QUOTAOFF logitems per quotaoff, the last one keeps a pointer
* to the first and ensures that the first logitem is taken out of the AIL
* only when the last one is securely committed.
*/
typedef struct xfs_qoff_logformat {
unsigned short qf_type; /* quotaoff log item type */
unsigned short qf_size; /* size of this item */
unsigned int qf_flags; /* USR and/or GRP */
char qf_pad[12]; /* padding for future */
} xfs_qoff_logformat_t;
#ifdef __KERNEL__
struct xfs_dquot;
struct xfs_trans;
struct xfs_mount;
struct xfs_qoff_logitem;
typedef struct xfs_dq_logitem {
xfs_log_item_t qli_item; /* common portion */
struct xfs_dquot *qli_dquot; /* dquot ptr */
xfs_lsn_t qli_flush_lsn; /* lsn at last flush */
unsigned short qli_pushbuf_flag; /* one bit used in push_ail */
unsigned short qli_pushbuf_flag; /* 1 bit used in push_ail */
#ifdef DEBUG
uint64_t qli_push_owner;
#endif
xfs_dq_logformat_t qli_format; /* logged structure */
} xfs_dq_logitem_t;
typedef struct xfs_qoff_logitem {
xfs_log_item_t qql_item; /* common portion */
struct xfs_qoff_logitem *qql_start_lip; /* qoff-start logitem, if any */
......@@ -93,12 +57,10 @@ typedef struct xfs_qoff_logitem {
extern void xfs_qm_dquot_logitem_init(struct xfs_dquot *);
extern xfs_qoff_logitem_t *xfs_qm_qoff_logitem_init(struct xfs_mount *,
xfs_qoff_logitem_t *, uint);
struct xfs_qoff_logitem *, uint);
extern xfs_qoff_logitem_t *xfs_trans_get_qoff_item(struct xfs_trans *,
xfs_qoff_logitem_t *, uint);
struct xfs_qoff_logitem *, uint);
extern void xfs_trans_log_quotaoff_item(struct xfs_trans *,
xfs_qoff_logitem_t *);
#endif /* __KERNEL__ */
struct xfs_qoff_logitem *);
#endif /* __XFS_DQUOT_ITEM_H__ */
......@@ -31,8 +31,17 @@
*/
#include <xfs.h>
#include <xfs_quota_priv.h>
#include "xfs_qm.h"
/*
* The global quota manager. There is only one of these for the entire
* system, _not_ one per file system. XQM keeps track of the overall
* quota functionality, including maintaining the freelist and hash
* tables of dquots.
*/
mutex_t xfs_Gqm_lock;
struct xfs_qm *xfs_Gqm;
EXPORT_SYMBOL(xfs_Gqm); /* used by xfsidbg */
kmem_zone_t *qm_dqzone;
kmem_zone_t *qm_dqtrxzone;
......@@ -51,24 +60,27 @@ extern mutex_t qcheck_lock;
#ifdef QUOTADEBUG
#define XQM_LIST_PRINT(l, NXT, title) \
{ \
xfs_dquot_t *dqp; int i = 0;\
printk("%s (#%d)\n", title, (int) (l)->qh_nelems); \
xfs_dquot_t *dqp; int i = 0; \
cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \
for (dqp = (l)->qh_next; dqp != NULL; dqp = dqp->NXT) { \
printk("\t%d.\t\"%d (%s)\"\t bcnt = %d, icnt = %d refs = %d\n", \
cmn_err(CE_DEBUG, " %d. \"%d (%s)\" " \
"bcnt = %d, icnt = %d, refs = %d", \
++i, (int) INT_GET(dqp->q_core.d_id, ARCH_CONVERT), \
DQFLAGTO_TYPESTR(dqp), \
(int) INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT), \
(int) INT_GET(dqp->q_core.d_icount, ARCH_CONVERT), \
(int) dqp->q_nrefs); } \
}
#else
#define XQM_LIST_PRINT(l, NXT, title) do { } while (0)
#endif
/*
* Initialize the XQM structure.
* Note that there is not one quota manager per file system.
*/
struct xfs_qm *
xfs_qm_init(void)
STATIC struct xfs_qm *
xfs_Gqm_init(void)
{
xfs_qm_t *xqm;
int hsize, i;
......@@ -83,11 +95,6 @@ xfs_qm_init(void)
XFS_QM_HASHSIZE_LOW : XFS_QM_HASHSIZE_HIGH;
xqm->qm_dqhashmask = hsize - 1;
/*
* XXXsup We could keep reference counts on usr and grp quotas
* inside XQM separately, and avoid having two hashtables even
* when only one 'type' is active in the system.
*/
xqm->qm_usr_dqhtable = (xfs_dqhash_t *)kmem_zalloc(hsize *
sizeof(xfs_dqhash_t),
KM_SLEEP);
......@@ -135,7 +142,7 @@ xfs_qm_init(void)
#ifdef DEBUG
mutex_init(&qcheck_lock, MUTEX_DEFAULT, "qchk");
#endif
return (xqm);
return xqm;
}
/*
......@@ -186,7 +193,7 @@ xfs_qm_hold_quotafs_ref(
XFS_QM_LOCK(xfs_Gqm);
if (xfs_Gqm == NULL) {
if ((xfs_Gqm = xfs_qm_init()) == NULL) {
if ((xfs_Gqm = xfs_Gqm_init()) == NULL) {
return (XFS_ERROR(EINVAL));
}
}
......@@ -295,6 +302,7 @@ void
xfs_qm_unmount_quotadestroy(
xfs_mount_t *mp)
{
if (mp->m_quotainfo)
xfs_qm_destroy_quotainfo(mp);
}
......@@ -416,23 +424,18 @@ xfs_qm_unmount_quotas(
xfs_mount_t *mp)
{
xfs_inode_t *uqp, *gqp;
int error;
error = 0;
int error = 0;
/*
* Release the dquots that root inode, et al might be holding,
* before we flush quotas and blow away the quotainfo structure.
*/
ASSERT(mp->m_rootip);
if (mp->m_rootip->i_udquot || mp->m_rootip->i_gdquot)
xfs_qm_dqdettach_inode(mp->m_rootip);
if (mp->m_rbmip &&
(mp->m_rbmip->i_udquot || mp->m_rbmip->i_gdquot))
xfs_qm_dqdettach_inode(mp->m_rbmip);
if (mp->m_rsumip &&
(mp->m_rsumip->i_udquot || mp->m_rsumip->i_gdquot))
xfs_qm_dqdettach_inode(mp->m_rsumip);
xfs_qm_dqdetach(mp->m_rootip);
if (mp->m_rbmip)
xfs_qm_dqdetach(mp->m_rbmip);
if (mp->m_rsumip)
xfs_qm_dqdetach(mp->m_rsumip);
/*
* Flush out the quota inodes.
......@@ -579,8 +582,8 @@ xfs_qm_detach_gdquots(
* parameter. This is used when turning off quota accounting for
* users and/or groups, as well as when the filesystem is unmounting.
*/
int
xfs_qm_dqpurge_all(
STATIC int
xfs_qm_dqpurge_int(
xfs_mount_t *mp,
uint flags) /* QUOTAOFF/UMOUNTING/UQUOTA/GQUOTA */
{
......@@ -652,7 +655,26 @@ xfs_qm_dqpurge_all(
dqp = nextdqp;
}
xfs_qm_mplist_unlock(mp);
return (nmisses);
return nmisses;
}
int
xfs_qm_dqpurge_all(
xfs_mount_t *mp,
uint flags)
{
int ndquots;
/*
* Purge the dquot cache.
* None of the dquots should really be busy at this point.
*/
if (mp->m_quotainfo) {
while ((ndquots = xfs_qm_dqpurge_int(mp, flags))) {
delay(ndquots * 10);
}
}
return 0;
}
STATIC int
......@@ -710,7 +732,6 @@ xfs_qm_dqattach_one(
xfs_dqunlock(dqp);
xfs_dqunlock(udqhint);
}
/* XXX XFS_STATS */
goto done;
}
/*
......@@ -874,40 +895,42 @@ xfs_qm_dqattach(
xfs_inode_t *ip,
uint flags)
{
int error;
xfs_mount_t *mp;
uint nquotas;
xfs_mount_t *mp = ip->i_mount;
uint nquotas = 0;
int error = 0;
mp = ip->i_mount;
ASSERT(ip->i_ino != mp->m_sb.sb_uquotino &&
ip->i_ino != mp->m_sb.sb_gquotino);
if ((! XFS_IS_QUOTA_ON(mp)) ||
(! XFS_NOT_DQATTACHED(mp, ip)) ||
(ip->i_ino == mp->m_sb.sb_uquotino) ||
(ip->i_ino == mp->m_sb.sb_gquotino))
return (0);
ASSERT((flags & XFS_QMOPT_ILOCKED) == 0 ||
XFS_ISLOCKED_INODE_EXCL(ip));
nquotas = 0;
error = 0;
if (! (flags & XFS_QMOPT_ILOCKED))
xfs_ilock(ip, XFS_ILOCK_EXCL);
if (XFS_IS_UQUOTA_ON(mp)) {
if ((error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
flags & XFS_QMOPT_DQALLOC,
flags & XFS_QMOPT_DQLOCK,
NULL, &ip->i_udquot)))
NULL, &ip->i_udquot);
if (error)
goto done;
nquotas++;
}
ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
if (XFS_IS_GQUOTA_ON(mp)) {
if ((error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
flags & XFS_QMOPT_DQALLOC,
flags & XFS_QMOPT_DQLOCK,
ip->i_udquot, &ip->i_gdquot)))
ip->i_udquot, &ip->i_gdquot);
/*
* Don't worry about the udquot that we may have
* attached above. It'll get dettached, if not already.
* attached above. It'll get detached, if not already.
*/
if (error)
goto done;
nquotas++;
}
......@@ -975,9 +998,12 @@ xfs_qm_dqattach(
* xfs_ireclaim.
*/
void
xfs_qm_dqdettach_inode(
xfs_qm_dqdetach(
xfs_inode_t *ip)
{
if (!(ip->i_udquot || ip->i_gdquot))
return;
ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino);
ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino);
if (ip->i_udquot)
......@@ -992,33 +1018,8 @@ xfs_qm_dqdettach_inode(
}
}
int
xfs_qm_unmount(
xfs_mount_t *mp)
{
vnode_t *vp;
if (XFS_IS_UQUOTA_ON(mp)) {
vp = XFS_ITOV(XFS_QI_UQIP(mp));
VN_RELE(vp);
if (vn_count(vp) > 1)
cmn_err(CE_WARN, "UQUOTA busy vp=0x%x count=%d",
vp, vn_count(vp));
}
if (XFS_IS_GQUOTA_ON(mp)) {
vp = XFS_ITOV(XFS_QI_GQIP(mp));
VN_RELE(vp);
if (vn_count(vp) > 1)
cmn_err(CE_WARN, "GQUOTA busy vp=0x%x count=%d",
vp, vn_count(vp));
}
return (0);
}
/*
* This is called by xfs_sync and flags arg determines the caller,
* This is called by VFS_SYNC and flags arg determines the caller,
* and its motives, as done in xfs_sync.
*
* vfs_sync: SYNC_FSDATA|SYNC_ATTR|SYNC_BDFLUSH 0x31
......@@ -1923,9 +1924,7 @@ xfs_qm_quotacheck(
mp->m_qflags &= ~(XFS_GQUOTA_CHKD | XFS_UQUOTA_CHKD);
mp->m_qflags |= flags;
#ifdef QUOTADEBUG
XQM_LIST_PRINT(&(XFS_QI_MPL_LIST(mp)), MPL_NEXT, "++++ Mp list +++");
#endif
error_return:
cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname);
......@@ -2033,7 +2032,7 @@ xfs_qm_shake_freelist(
nflushes = 0;
#ifdef QUOTADEBUG
printk("Shake free 0x%x\n", howmany);
cmn_err(CE_DEBUG, "Shake free 0x%x", howmany);
#endif
/* lock order is : hashchainlock, freelistlock, mplistlock */
tryagain:
......@@ -2053,7 +2052,7 @@ xfs_qm_shake_freelist(
xfs_qm_freelist_unlock(xfs_Gqm);
if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
return (nreclaimed != howmany);
XFS_STATS_INC(xfsstats.xs_qm_dqwants);
XQM_STATS_INC(xqmstats.xs_qm_dqwants);
goto tryagain;
}
......@@ -2067,7 +2066,7 @@ xfs_qm_shake_freelist(
ASSERT(! XFS_DQ_IS_DIRTY(dqp));
ASSERT(dqp->HL_PREVP == NULL);
ASSERT(dqp->MPL_PREVP == NULL);
XFS_STATS_INC(xfsstats.xs_qm_dqinact_reclaims);
XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
nextdqp = dqp->dq_flnext;
goto off_freelist;
}
......@@ -2132,7 +2131,8 @@ xfs_qm_shake_freelist(
}
xfs_dqtrace_entry(dqp, "DQSHAKE: UNLINKING");
#ifdef QUOTADEBUG
printk("Shake 0x%p, ID 0x%x\n", dqp, INT_GET(dqp->q_core.d_id, ARCH_CONVERT));
cmn_err(CE_DEBUG, "Shake 0x%p, ID 0x%x\n",
dqp, INT_GET(dqp->q_core.d_id, ARCH_CONVERT));
#endif
ASSERT(dqp->q_nrefs == 0);
nextdqp = dqp->dq_flnext;
......@@ -2146,7 +2146,7 @@ xfs_qm_shake_freelist(
XQM_FREELIST_REMOVE(dqp);
xfs_dqunlock(dqp);
nreclaimed++;
XFS_STATS_INC(xfsstats.xs_qm_dqshake_reclaims);
XQM_STATS_INC(xqmstats.xs_qm_dqshake_reclaims);
xfs_qm_dqdestroy(dqp);
dqp = nextdqp;
}
......@@ -2220,7 +2220,7 @@ xfs_qm_dqreclaim_one(void)
xfs_qm_freelist_unlock(xfs_Gqm);
if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
return (NULL);
XFS_STATS_INC(xfsstats.xs_qm_dqwants);
XQM_STATS_INC(xqmstats.xs_qm_dqwants);
goto startagain;
}
......@@ -2237,7 +2237,7 @@ xfs_qm_dqreclaim_one(void)
XQM_FREELIST_REMOVE(dqp);
xfs_dqunlock(dqp);
dqpout = dqp;
XFS_STATS_INC(xfsstats.xs_qm_dqinact_reclaims);
XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
break;
}
......@@ -2323,7 +2323,7 @@ xfs_qm_dqalloc_incore(
* Try to recycle a dquot from the freelist.
*/
if ((dqp = xfs_qm_dqreclaim_one())) {
XFS_STATS_INC(xfsstats.xs_qm_dqreclaims);
XQM_STATS_INC(xqmstats.xs_qm_dqreclaims);
/*
* Just zero the core here. The rest will get
* reinitialized by caller. XXX we shouldn't even
......@@ -2333,7 +2333,7 @@ xfs_qm_dqalloc_incore(
*O_dqpp = dqp;
return (B_FALSE);
}
XFS_STATS_INC(xfsstats.xs_qm_dqreclaim_misses);
XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses);
}
/*
......@@ -2361,9 +2361,7 @@ xfs_qm_write_sb_changes(
int error;
#ifdef QUOTADEBUG
cmn_err(CE_NOTE,
"Writing superblock quota changes :%s",
mp->m_fsname);
cmn_err(CE_NOTE, "Writing superblock quota changes :%s", mp->m_fsname);
#endif
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
if ((error = xfs_trans_reserve(tp, 0,
......@@ -2408,6 +2406,9 @@ xfs_qm_vop_dqalloc(
xfs_dquot_t *uq, *gq;
uint lockflags;
if (!XFS_IS_QUOTA_ON(mp))
return 0;
lockflags = XFS_ILOCK_EXCL;
xfs_ilock(ip, lockflags);
......@@ -2564,7 +2565,7 @@ xfs_qm_vop_chown_reserve(
xfs_inode_t *ip,
xfs_dquot_t *udqp,
xfs_dquot_t *gdqp,
uint privileged)
uint flags)
{
int error;
xfs_mount_t *mp;
......@@ -2600,13 +2601,11 @@ xfs_qm_vop_chown_reserve(
}
}
if ((error = xfs_trans_reserve_quota(tp, delblksudq,
delblksgdq,
ip->i_d.di_nblocks, 1,
privileged)))
if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
delblksudq, delblksgdq, ip->i_d.di_nblocks, 1,
flags | XFS_QMOPT_RES_REGBLKS)))
return (error);
/*
* Do the delayed blks reservations/unreservations now. Since, these
* are done without the help of a transaction, if a reservation fails
......@@ -2619,15 +2618,13 @@ xfs_qm_vop_chown_reserve(
*/
ASSERT(delblksudq || delblksgdq);
ASSERT(unresudq || unresgdq);
if ((error = xfs_trans_reserve_quota(NULL,
delblksudq, delblksgdq,
(xfs_qcnt_t)delblks, 0,
privileged)))
if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0,
flags | XFS_QMOPT_RES_REGBLKS)))
return (error);
(void) xfs_trans_unreserve_quota(NULL,
unresudq, unresgdq,
(xfs_qcnt_t)delblks, 0,
0);
xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0,
XFS_QMOPT_RES_REGBLKS);
}
return (0);
......@@ -2643,6 +2640,9 @@ xfs_qm_vop_rename_dqattach(
ip = i_tab[0];
if (! XFS_IS_QUOTA_ON(ip->i_mount))
return (0);
if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) {
error = xfs_qm_dqattach(ip, 0);
if (error)
......@@ -2670,6 +2670,9 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
xfs_dquot_t *udqp,
xfs_dquot_t *gdqp)
{
if (!XFS_IS_QUOTA_ON(tp->t_mountp))
return;
ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
......@@ -2714,7 +2717,7 @@ xfs_qm_freelist_destroy(xfs_frlist_t *ql)
xfs_dqlock(dqp);
nextdqp = dqp->dq_flnext;
#ifdef QUOTADEBUG
printk("FREELIST destroy 0x%p\n", dqp);
cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp);
#endif
XQM_FREELIST_REMOVE(dqp);
xfs_dqunlock(dqp);
......@@ -2753,25 +2756,6 @@ xfs_qm_freelist_unlink(xfs_dquot_t *dq)
xfs_Gqm->qm_dqfreelist.qh_version++;
}
#ifdef QUOTADEBUG
void
xfs_qm_freelist_print(xfs_frlist_t *qlist, char *title)
{
xfs_dquot_t *dq;
int i = 0;
printk("%s (#%d)\n", title, (int) qlist->qh_nelems);
FOREACH_DQUOT_IN_FREELIST(dq, qlist) {
printk("\t%d.\t\"%d (%s:0x%p)\"\t bcnt = %d, icnt = %d "
"refs = %d\n",
++i, INT_GET(dq->q_core.d_id, ARCH_CONVERT),
DQFLAGTO_TYPESTR(dq), dq,
(int) INT_GET(dq->q_core.d_bcount, ARCH_CONVERT),
(int) INT_GET(dq->q_core.d_icount, ARCH_CONVERT),
(int) dq->q_nrefs);
}
}
#endif
void
xfs_qm_freelist_append(xfs_frlist_t *ql, xfs_dquot_t *dq)
{
......
......@@ -32,10 +32,16 @@
#ifndef __XFS_QM_H__
#define __XFS_QM_H__
struct xfs_dqhash;
#include "xfs_dquot_item.h"
#include "xfs_dquot.h"
#include "xfs_quota_priv.h"
#include "xfs_qm_stats.h"
struct xfs_qm;
struct xfs_inode;
struct xfs_dquot;
extern mutex_t xfs_Gqm_lock;
extern struct xfs_qm *xfs_Gqm;
extern kmem_zone_t *qm_dqzone;
extern kmem_zone_t *qm_dqtrxzone;
......@@ -136,24 +142,13 @@ typedef struct xfs_quotainfo {
} xfs_quotainfo_t;
/*
* The structure kept inside the xfs_trans_t keep track of dquot changes
* within a transaction and apply them later.
*/
typedef struct xfs_dqtrx {
struct xfs_dquot *qt_dquot; /* the dquot this refers to */
ulong qt_blk_res; /* blks reserved on a dquot */
ulong qt_blk_res_used; /* blks used from the reservation */
ulong qt_ino_res; /* inode reserved on a dquot */
ulong qt_ino_res_used; /* inodes used from the reservation */
long qt_bcount_delta; /* dquot blk count changes */
long qt_delbcnt_delta; /* delayed dquot blk count changes */
long qt_icount_delta; /* dquot inode count changes */
ulong qt_rtblk_res; /* # blks reserved on a dquot */
ulong qt_rtblk_res_used;/* # blks used from reservation */
long qt_rtbcount_delta;/* dquot realtime blk changes */
long qt_delrtb_delta; /* delayed RT blk count changes */
} xfs_dqtrx_t;
extern xfs_dqtrxops_t xfs_trans_dquot_ops;
extern void xfs_trans_mod_dquot(xfs_trans_t *, xfs_dquot_t *, uint, long);
extern int xfs_trans_reserve_quota_bydquots(xfs_trans_t *, xfs_mount_t *,
xfs_dquot_t *, xfs_dquot_t *, long, long, uint);
extern void xfs_trans_dqjoin(xfs_trans_t *, xfs_dquot_t *);
extern void xfs_trans_log_dquot(xfs_trans_t *, xfs_dquot_t *);
/*
* We keep the usr and grp dquots separately so that locking will be easier
......@@ -184,9 +179,33 @@ typedef struct xfs_dquot_acct {
extern int xfs_qm_init_quotainfo(xfs_mount_t *);
extern void xfs_qm_destroy_quotainfo(xfs_mount_t *);
extern int xfs_qm_mount_quotas(xfs_mount_t *);
extern void xfs_qm_mount_quotainit(xfs_mount_t *, uint);
extern void xfs_qm_unmount_quotadestroy(xfs_mount_t *);
extern int xfs_qm_unmount_quotas(xfs_mount_t *);
extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t);
extern int xfs_qm_sync(xfs_mount_t *, short);
/* dquot stuff */
extern void xfs_qm_dqunlink(xfs_dquot_t *);
extern boolean_t xfs_qm_dqalloc_incore(xfs_dquot_t **);
extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t);
extern int xfs_qm_dqattach(xfs_inode_t *, uint);
extern void xfs_qm_dqdetach(xfs_inode_t *);
extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint);
extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint);
/* vop stuff */
extern int xfs_qm_vop_dqalloc(xfs_mount_t *, xfs_inode_t *,
uid_t, gid_t, uint,
xfs_dquot_t **, xfs_dquot_t **);
extern void xfs_qm_vop_dqattach_and_dqmod_newinode(
xfs_trans_t *, xfs_inode_t *,
xfs_dquot_t *, xfs_dquot_t *);
extern int xfs_qm_vop_rename_dqattach(xfs_inode_t **);
extern xfs_dquot_t * xfs_qm_vop_chown(xfs_trans_t *, xfs_inode_t *,
xfs_dquot_t **, xfs_dquot_t *);
extern int xfs_qm_vop_chown_reserve(xfs_trans_t *, xfs_inode_t *,
xfs_dquot_t *, xfs_dquot_t *, uint);
/* list stuff */
extern void xfs_qm_freelist_init(xfs_frlist_t *);
......@@ -207,10 +226,4 @@ extern int xfs_qm_internalqcheck(xfs_mount_t *);
#define xfs_qm_internalqcheck(mp) (0)
#endif
#ifdef QUOTADEBUG
extern void xfs_qm_freelist_print(xfs_frlist_t *, char *);
#else
#define xfs_qm_freelist_print(a, b) do { } while (0)
#endif
#endif /* __XFS_QM_H__ */
/*
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/
#include <xfs.h>
#include <linux/init.h>
#include "xfs_qm.h"
#define MNTOPT_QUOTA "quota" /* disk quotas (user) */
#define MNTOPT_NOQUOTA "noquota" /* no quotas */
#define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */
#define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */
#define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */
#define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */
#define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */
#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
STATIC int
xfs_qm_parseargs(
struct bhv_desc *bhv,
char *options,
struct xfs_mount_args *args,
int update)
{
size_t length;
char *local_options = options;
char *this_char;
int error;
int referenced = update;
while ((this_char = strsep(&local_options, ",")) != NULL) {
length = strlen(this_char);
if (local_options)
length++;
if (!strcmp(this_char, MNTOPT_NOQUOTA)) {
args->flags &= ~(XFSMNT_UQUOTAENF|XFSMNT_UQUOTA);
args->flags &= ~(XFSMNT_GQUOTAENF|XFSMNT_GQUOTA);
referenced = update;
} else if (!strcmp(this_char, MNTOPT_QUOTA) ||
!strcmp(this_char, MNTOPT_UQUOTA) ||
!strcmp(this_char, MNTOPT_USRQUOTA)) {
args->flags |= XFSMNT_UQUOTA | XFSMNT_UQUOTAENF;
referenced = 1;
} else if (!strcmp(this_char, MNTOPT_QUOTANOENF) ||
!strcmp(this_char, MNTOPT_UQUOTANOENF)) {
args->flags |= XFSMNT_UQUOTA;
args->flags &= ~XFSMNT_UQUOTAENF;
referenced = 1;
} else if (!strcmp(this_char, MNTOPT_GQUOTA) ||
!strcmp(this_char, MNTOPT_GRPQUOTA)) {
args->flags |= XFSMNT_GQUOTA | XFSMNT_GQUOTAENF;
referenced = 1;
} else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
args->flags |= XFSMNT_GQUOTA;
args->flags &= ~XFSMNT_GQUOTAENF;
referenced = 1;
} else {
if (local_options)
*(local_options-1) = ',';
continue;
}
while (length--)
*this_char++ = ',';
}
PVFS_PARSEARGS(BHV_NEXT(bhv), options, args, update, error);
if (!error && !referenced)
bhv_remove_vfsops(bhvtovfs(bhv), VFS_POSITION_QM);
return error;
}
STATIC int
xfs_qm_showargs(
struct bhv_desc *bhv,
struct seq_file *m)
{
struct vfs *vfsp = bhvtovfs(bhv);
struct xfs_mount *mp = XFS_VFSTOM(vfsp);
int error;
if (mp->m_qflags & XFS_UQUOTA_ACCT) {
(mp->m_qflags & XFS_UQUOTA_ENFD) ?
seq_puts(m, "," MNTOPT_USRQUOTA) :
seq_puts(m, "," MNTOPT_UQUOTANOENF);
}
if (mp->m_qflags & XFS_GQUOTA_ACCT) {
(mp->m_qflags & XFS_GQUOTA_ENFD) ?
seq_puts(m, "," MNTOPT_GRPQUOTA) :
seq_puts(m, "," MNTOPT_GQUOTANOENF);
}
if (!(mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT)))
seq_puts(m, "," MNTOPT_NOQUOTA);
PVFS_SHOWARGS(BHV_NEXT(bhv), m, error);
return error;
}
STATIC int
xfs_qm_mount(
struct bhv_desc *bhv,
struct xfs_mount_args *args,
struct cred *cr)
{
struct vfs *vfsp = bhvtovfs(bhv);
struct xfs_mount *mp = XFS_VFSTOM(vfsp);
int error;
if (args->flags & (XFSMNT_UQUOTA | XFSMNT_GQUOTA))
xfs_qm_mount_quotainit(mp, args->flags);
PVFS_MOUNT(BHV_NEXT(bhv), args, cr, error);
return error;
}
STATIC int
xfs_qm_syncall(
struct bhv_desc *bhv,
int flags,
cred_t *credp)
{
struct vfs *vfsp = bhvtovfs(bhv);
struct xfs_mount *mp = XFS_VFSTOM(vfsp);
int error;
/*
* Get the Quota Manager to flush the dquots.
*/
if (XFS_IS_QUOTA_ON(mp)) {
if ((error = xfs_qm_sync(mp, flags))) {
/*
* If we got an IO error, we will be shutting down.
* So, there's nothing more for us to do here.
*/
ASSERT(error != EIO || XFS_FORCED_SHUTDOWN(mp));
if (XFS_FORCED_SHUTDOWN(mp)) {
return XFS_ERROR(error);
}
}
}
PVFS_SYNC(BHV_NEXT(bhv), flags, credp, error);
return error;
}
/*
* When xfsquotas isn't installed and the superblock had quotas, we need to
* clear the quotaflags from superblock.
*/
STATIC void
xfs_mount_reset_sbqflags(
xfs_mount_t *mp)
{
xfs_trans_t *tp;
unsigned long s;
mp->m_qflags = 0;
/*
* It is OK to look at sb_qflags here in mount path,
* without SB_LOCK.
*/
if (mp->m_sb.sb_qflags == 0)
return;
s = XFS_SB_LOCK(mp);
mp->m_sb.sb_qflags = 0;
XFS_SB_UNLOCK(mp, s);
/*
* if the fs is readonly, let the incore superblock run
* with quotas off but don't flush the update out to disk
*/
if (XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY)
return;
#ifdef QUOTADEBUG
xfs_fs_cmn_err(CE_NOTE, mp, "Writing superblock quota changes");
#endif
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
if (xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
XFS_DEFAULT_LOG_COUNT)) {
xfs_trans_cancel(tp, 0);
return;
}
xfs_mod_sb(tp, XFS_SB_QFLAGS);
xfs_trans_commit(tp, 0, NULL);
}
STATIC int
xfs_qm_newmount(
xfs_mount_t *mp,
uint *needquotamount,
uint *quotaflags)
{
uint quotaondisk;
uint uquotaondisk = 0, gquotaondisk = 0;
*quotaflags = 0;
*needquotamount = B_FALSE;
quotaondisk = XFS_SB_VERSION_HASQUOTA(&mp->m_sb) &&
mp->m_sb.sb_qflags & (XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT);
if (quotaondisk) {
uquotaondisk = mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT;
gquotaondisk = mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT;
}
/*
* If the device itself is read-only, we can't allow
* the user to change the state of quota on the mount -
* this would generate a transaction on the ro device,
* which would lead to an I/O error and shutdown
*/
if (((uquotaondisk && !XFS_IS_UQUOTA_ON(mp)) ||
(!uquotaondisk && XFS_IS_UQUOTA_ON(mp)) ||
(gquotaondisk && !XFS_IS_GQUOTA_ON(mp)) ||
(!gquotaondisk && XFS_IS_GQUOTA_ON(mp))) &&
xfs_dev_is_read_only(mp, "changing quota state")) {
cmn_err(CE_WARN,
"XFS: please mount with%s%s%s.",
(!quotaondisk ? "out quota" : ""),
(uquotaondisk ? " usrquota" : ""),
(gquotaondisk ? " grpquota" : ""));
return XFS_ERROR(EPERM);
}
if (XFS_IS_QUOTA_ON(mp) || quotaondisk) {
/*
* Call mount_quotas at this point only if we won't have to do
* a quotacheck.
*/
if (quotaondisk && !XFS_QM_NEED_QUOTACHECK(mp)) {
/*
* If the xfs quota code isn't installed,
* we have to reset the quotachk'd bit.
* If an error occured, qm_mount_quotas code
* has already disabled quotas. So, just finish
* mounting, and get on with the boring life
* without disk quotas.
*/
if (xfs_qm_mount_quotas(mp))
xfs_mount_reset_sbqflags(mp);
} else {
/*
* Clear the quota flags, but remember them. This
* is so that the quota code doesn't get invoked
* before we're ready. This can happen when an
* inode goes inactive and wants to free blocks,
* or via xfs_log_mount_finish.
*/
*needquotamount = B_TRUE;
*quotaflags = mp->m_qflags;
mp->m_qflags = 0;
}
}
return 0;
}
STATIC int
xfs_qm_endmount(
xfs_mount_t *mp,
uint needquotamount,
uint quotaflags)
{
if (needquotamount) {
ASSERT(mp->m_qflags == 0);
mp->m_qflags = quotaflags;
if (xfs_qm_mount_quotas(mp))
xfs_mount_reset_sbqflags(mp);
}
#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
if (! (XFS_IS_QUOTA_ON(mp)))
xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas not turned on");
else
xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas turned on");
#endif
#ifdef QUOTADEBUG
if (XFS_IS_QUOTA_ON(mp) && xfs_qm_internalqcheck(mp))
cmn_err(CE_WARN, "XFS: mount internalqcheck failed");
#endif
return 0;
}
STATIC void
xfs_qm_dqrele_null(
xfs_dquot_t *dq)
{
/*
* Called from XFS, where we always check first for a NULL dquot.
*/
if (!dq)
return;
xfs_qm_dqrele(dq);
}
struct xfs_qmops xfs_qmcore_xfs = {
.xfs_qminit = xfs_qm_newmount,
.xfs_qmdone = xfs_qm_unmount_quotadestroy,
.xfs_qmmount = xfs_qm_endmount,
.xfs_qmunmount = xfs_qm_unmount_quotas,
.xfs_dqrele = xfs_qm_dqrele_null,
.xfs_dqattach = xfs_qm_dqattach,
.xfs_dqdetach = xfs_qm_dqdetach,
.xfs_dqpurgeall = xfs_qm_dqpurge_all,
.xfs_dqvopalloc = xfs_qm_vop_dqalloc,
.xfs_dqvopcreate = xfs_qm_vop_dqattach_and_dqmod_newinode,
.xfs_dqvoprename = xfs_qm_vop_rename_dqattach,
.xfs_dqvopchown = xfs_qm_vop_chown,
.xfs_dqvopchownresv = xfs_qm_vop_chown_reserve,
.xfs_dqtrxops = &xfs_trans_dquot_ops,
};
struct bhv_vfsops xfs_qmops = { {
BHV_IDENTITY_INIT(VFS_BHV_QM, VFS_POSITION_QM),
.vfs_parseargs = xfs_qm_parseargs,
.vfs_showargs = xfs_qm_showargs,
.vfs_mount = xfs_qm_mount,
.vfs_sync = xfs_qm_syncall,
.vfs_quotactl = xfs_qm_quotactl, },
};
void __init
xfs_qm_init(void)
{
static char message[] __initdata =
KERN_INFO "SGI XFS Quota Management subsystem\n";
printk(message);
mutex_init(&xfs_Gqm_lock, MUTEX_DEFAULT, "xfs_qmlock");
vfs_bhv_set_custom(&xfs_qmops, &xfs_qmcore_xfs);
xfs_qm_init_procfs();
}
void __exit
xfs_qm_exit(void)
{
vfs_bhv_clr_custom(&xfs_qmops);
xfs_qm_cleanup_procfs();
if (qm_dqzone)
kmem_cache_destroy(qm_dqzone);
if (qm_dqtrxzone)
kmem_cache_destroy(qm_dqtrxzone);
}
/*
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/
#include <xfs.h>
#include <linux/proc_fs.h>
#include "xfs_qm.h"
struct xqmstats xqmstats;
STATIC int
xfs_qm_read_xfsquota(
char *buffer,
char **start,
off_t offset,
int count,
int *eof,
void *data)
{
int len;
/* maximum; incore; ratio free to inuse; freelist */
len = sprintf(buffer, "%d\t%d\t%d\t%u\n",
ndquot,
xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0,
xfs_Gqm? xfs_Gqm->qm_dqfree_ratio : 0,
xfs_Gqm? xfs_Gqm->qm_dqfreelist.qh_nelems : 0);
if (offset >= len) {
*start = buffer;
*eof = 1;
return 0;
}
*start = buffer + offset;
if ((len -= offset) > count)
return count;
*eof = 1;
return len;
}
STATIC int
xfs_qm_read_stats(
char *buffer,
char **start,
off_t offset,
int count,
int *eof,
void *data)
{
int len;
/* quota performance statistics */
len = sprintf(buffer, "qm %u %u %u %u %u %u %u %u\n",
xqmstats.xs_qm_dqreclaims,
xqmstats.xs_qm_dqreclaim_misses,
xqmstats.xs_qm_dquot_dups,
xqmstats.xs_qm_dqcachemisses,
xqmstats.xs_qm_dqcachehits,
xqmstats.xs_qm_dqwants,
xqmstats.xs_qm_dqshake_reclaims,
xqmstats.xs_qm_dqinact_reclaims);
if (offset >= len) {
*start = buffer;
*eof = 1;
return 0;
}
*start = buffer + offset;
if ((len -= offset) > count)
return count;
*eof = 1;
return len;
}
void
xfs_qm_init_procfs(void)
{
create_proc_read_entry("fs/xfs/xqmstat", 0, 0, xfs_qm_read_stats, NULL);
create_proc_read_entry("fs/xfs/xqm", 0, 0, xfs_qm_read_xfsquota, NULL);
}
void
xfs_qm_cleanup_procfs(void)
{
remove_proc_entry("fs/xfs/xqm", NULL);
remove_proc_entry("fs/xfs/xqmstat", NULL);
}
/*
* Copyright (c) 2002 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/
#ifndef __XFS_QM_STATS_H__
#define __XFS_QM_STATS_H__
#if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF)
/*
* XQM global statistics
*/
struct xqmstats {
__uint32_t xs_qm_dqreclaims;
__uint32_t xs_qm_dqreclaim_misses;
__uint32_t xs_qm_dquot_dups;
__uint32_t xs_qm_dqcachemisses;
__uint32_t xs_qm_dqcachehits;
__uint32_t xs_qm_dqwants;
__uint32_t xs_qm_dqshake_reclaims;
__uint32_t xs_qm_dqinact_reclaims;
};
extern struct xqmstats xqmstats;
# define XQM_STATS_INC(count) ( (count)++ )
extern void xfs_qm_init_procfs(void);
extern void xfs_qm_cleanup_procfs(void);
#else
# define XQM_STATS_INC(count) do { } while (0)
static __inline void xfs_qm_init_procfs(void) { };
static __inline void xfs_qm_cleanup_procfs(void) { };
#endif
#endif /* __XFS_QM_STATS_H__ */
......@@ -31,7 +31,7 @@
*/
#include <xfs.h>
#include <xfs_quota_priv.h>
#include "xfs_qm.h"
#ifdef DEBUG
# define qdprintk(s, args...) cmn_err(CE_DEBUG, s, ## args)
......
/*
* Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -67,8 +67,8 @@
#define XQMLCK(h) (mutex_lock(&((h)->qh_lock), PINOD))
#define XQMUNLCK(h) (mutex_unlock(&((h)->qh_lock)))
#ifdef DEBUG
static inline int
XQMISLCKD(xfs_dqhash_t *h)
struct xfs_dqhash;
static inline int XQMISLCKD(struct xfs_dqhash *h)
{
if (mutex_trylock(&h->qh_lock)) {
mutex_unlock(&h->qh_lock);
......
......@@ -31,8 +31,9 @@
*/
#include <xfs.h>
#include <xfs_quota_priv.h>
#include "xfs_qm.h"
STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *);
/*
* Add the locked dquot to the transaction.
......@@ -95,7 +96,7 @@ xfs_trans_log_dquot(
* Carry forward whatever is left of the quota blk reservation to
* the spanky new transaction
*/
void
STATIC void
xfs_trans_dup_dqinfo(
xfs_trans_t *otp,
xfs_trans_t *ntp)
......@@ -104,6 +105,9 @@ xfs_trans_dup_dqinfo(
int i,j;
xfs_dqtrx_t *oqa, *nqa;
if (!otp->t_dqinfo)
return;
xfs_trans_alloc_dqinfo(ntp);
oqa = otp->t_dqinfo->dqa_usrdquots;
nqa = ntp->t_dqinfo->dqa_usrdquots;
......@@ -155,15 +159,23 @@ xfs_trans_mod_dquot_byino(
uint field,
long delta)
{
xfs_mount_t *mp;
ASSERT(tp);
mp = tp->t_mountp;
if (!XFS_IS_QUOTA_ON(mp) ||
ip->i_ino == mp->m_sb.sb_uquotino ||
ip->i_ino == mp->m_sb.sb_gquotino)
return;
if (tp->t_dqinfo == NULL)
xfs_trans_alloc_dqinfo(tp);
if (XFS_IS_UQUOTA_ON(tp->t_mountp) && ip->i_udquot) {
if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot) {
(void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
}
if (XFS_IS_GQUOTA_ON(tp->t_mountp) && ip->i_gdquot) {
if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot) {
(void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
}
}
......@@ -318,7 +330,7 @@ xfs_trans_dqlockedjoin(
* xfs_trans_apply_sb_deltas().
* Go thru all the dquots belonging to this transaction and modify the
* INCORE dquot to reflect the actual usages.
* Unreserve just the reservations done by this transaction
* Unreserve just the reservations done by this transaction.
* dquot is still left locked at exit.
*/
void
......@@ -332,6 +344,9 @@ xfs_trans_apply_dquot_deltas(
long totalbdelta;
long totalrtbdelta;
if (! (tp->t_flags & XFS_TRANS_DQ_DIRTY))
return;
ASSERT(tp->t_dqinfo);
qa = tp->t_dqinfo->dqa_usrdquots;
for (j = 0; j < 2; j++) {
......@@ -481,13 +496,15 @@ xfs_trans_apply_dquot_deltas(
#ifdef QUOTADEBUG
if (qtrx->qt_rtblk_res != 0)
printk("RT res %d for 0x%p\n",
(int) qtrx->qt_rtblk_res,
dqp);
cmn_err(CE_DEBUG, "RT res %d for 0x%p\n",
(int) qtrx->qt_rtblk_res, dqp);
#endif
ASSERT(dqp->q_res_bcount >= INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT));
ASSERT(dqp->q_res_icount >= INT_GET(dqp->q_core.d_icount, ARCH_CONVERT));
ASSERT(dqp->q_res_rtbcount >= INT_GET(dqp->q_core.d_rtbcount, ARCH_CONVERT));
ASSERT(dqp->q_res_bcount >=
INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT));
ASSERT(dqp->q_res_icount >=
INT_GET(dqp->q_core.d_icount, ARCH_CONVERT));
ASSERT(dqp->q_res_rtbcount >=
INT_GET(dqp->q_core.d_rtbcount, ARCH_CONVERT));
}
/*
* Do the group quotas next
......@@ -503,7 +520,7 @@ xfs_trans_apply_dquot_deltas(
* we simply throw those away, since that's the expected behavior
* when a transaction is curtailed without a commit.
*/
void
STATIC void
xfs_trans_unreserve_and_mod_dquots(
xfs_trans_t *tp)
{
......@@ -512,7 +529,9 @@ xfs_trans_unreserve_and_mod_dquots(
xfs_dqtrx_t *qtrx, *qa;
boolean_t locked;
ASSERT(tp->t_dqinfo);
if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
return;
qa = tp->t_dqinfo->dqa_usrdquots;
for (j = 0; j < 2; j++) {
......@@ -604,8 +623,8 @@ xfs_trans_dqresv(
!INT_ISZERO(dqp->q_core.d_id, ARCH_CONVERT) &&
XFS_IS_QUOTA_ENFORCED(dqp->q_mount)) {
#ifdef QUOTADEBUG
printk("BLK Res: nblks=%ld + resbcount=%Ld > hardlimit=%Ld?\n",
nblks, *resbcountp, hardlimit);
cmn_err(CE_DEBUG, "BLK Res: nblks=%ld + resbcount=%Ld"
" > hardlimit=%Ld?", nblks, *resbcountp, hardlimit);
#endif
if (nblks > 0) {
/*
......@@ -713,6 +732,7 @@ xfs_trans_dqresv(
int
xfs_trans_reserve_quota_bydquots(
xfs_trans_t *tp,
xfs_mount_t *mp,
xfs_dquot_t *udqp,
xfs_dquot_t *gdqp,
long nblks,
......@@ -721,6 +741,9 @@ xfs_trans_reserve_quota_bydquots(
{
int resvd;
if (! XFS_IS_QUOTA_ON(mp))
return (0);
if (tp && tp->t_dqinfo == NULL)
xfs_trans_alloc_dqinfo(tp);
......@@ -760,9 +783,10 @@ xfs_trans_reserve_quota_bydquots(
*
* Returns 0 on success, EDQUOT or other errors otherwise
*/
int
STATIC int
xfs_trans_reserve_quota_nblks(
xfs_trans_t *tp,
xfs_mount_t *mp,
xfs_inode_t *ip,
long nblks,
long ninos,
......@@ -770,6 +794,12 @@ xfs_trans_reserve_quota_nblks(
{
int error;
if (!XFS_IS_QUOTA_ON(mp))
return (0);
ASSERT(ip->i_ino != mp->m_sb.sb_uquotino);
ASSERT(ip->i_ino != mp->m_sb.sb_gquotino);
#ifdef QUOTADEBUG
if (ip->i_udquot)
ASSERT(! XFS_DQ_IS_LOCKED(ip->i_udquot));
......@@ -785,7 +815,7 @@ xfs_trans_reserve_quota_nblks(
/*
* Reserve nblks against these dquots, with trans as the mediator.
*/
error = xfs_trans_reserve_quota_bydquots(tp,
error = xfs_trans_reserve_quota_bydquots(tp, mp,
ip->i_udquot, ip->i_gdquot,
nblks, ninos,
type);
......@@ -836,17 +866,29 @@ xfs_trans_log_quotaoff_item(
lidp->lid_flags |= XFS_LID_DIRTY;
}
void
STATIC void
xfs_trans_alloc_dqinfo(
xfs_trans_t *tp)
{
(tp)->t_dqinfo = kmem_zone_zalloc(xfs_Gqm->qm_dqtrxzone, KM_SLEEP);
}
void
STATIC void
xfs_trans_free_dqinfo(
xfs_trans_t *tp)
{
if (!tp->t_dqinfo)
return;
kmem_zone_free(xfs_Gqm->qm_dqtrxzone, (tp)->t_dqinfo);
(tp)->t_dqinfo = NULL;
}
xfs_dqtrxops_t xfs_trans_dquot_ops = {
.qo_dup_dqinfo = xfs_trans_dup_dqinfo,
.qo_free_dqinfo = xfs_trans_free_dqinfo,
.qo_mod_dquot_byino = xfs_trans_mod_dquot_byino,
.qo_apply_dquot_deltas = xfs_trans_apply_dquot_deltas,
.qo_reserve_quota_nblks = xfs_trans_reserve_quota_nblks,
.qo_reserve_quota_bydquots = xfs_trans_reserve_quota_bydquots,
.qo_unreserve_and_mod_dquots = xfs_trans_unreserve_and_mod_dquots,
};
/*
* Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -66,6 +66,9 @@
#include <xfs_dir.h>
#include <xfs_dir2.h>
#include <xfs_imap.h>
#include <xfs_alloc.h>
#include <xfs_dmapi.h>
#include <xfs_quota.h>
#include <xfs_mount.h>
#include <xfs_alloc_btree.h>
#include <xfs_bmap_btree.h>
......@@ -77,17 +80,11 @@
#include <xfs_dir2_sf.h>
#include <xfs_dinode.h>
#include <xfs_inode.h>
#include <xfs_alloc.h>
#include <xfs_bmap.h>
#include <xfs_bit.h>
#include <xfs_rtalloc.h>
#include <xfs_error.h>
#include <xfs_quota.h>
#include <xfs_itable.h>
#include <xfs_dqblk.h>
#include <xfs_dquot_item.h>
#include <xfs_dquot.h>
#include <xfs_qm.h>
#include <xfs_rw.h>
#include <xfs_da_btree.h>
#include <xfs_dir_leaf.h>
......@@ -108,6 +105,5 @@
#include <xfs_trans_priv.h>
#include <xfs_trans_space.h>
#include <xfs_utils.h>
#include <xfs_dmapi.h>
#endif /* __XFS_H__ */
......@@ -197,10 +197,8 @@ xfs_attr_set(bhv_desc_t *bdp, char *name, char *value, int valuelen, int flags,
/*
* Attach the dquots to the inode.
*/
if (XFS_IS_QUOTA_ON(mp)) {
if ((error = xfs_qm_dqattach(dp, 0)))
if ((error = XFS_QM_DQATTACH(mp, dp, 0)))
return (error);
}
/*
* If the inode doesn't have an attribute fork, add one.
......@@ -280,20 +278,14 @@ xfs_attr_set(bhv_desc_t *bdp, char *name, char *value, int valuelen, int flags,
}
xfs_ilock(dp, XFS_ILOCK_EXCL);
if (XFS_IS_QUOTA_ON(mp)) {
if (rsvd) {
error = xfs_trans_reserve_blkquota_force(args.trans,
dp, nblks);
} else {
error = xfs_trans_reserve_blkquota(args.trans,
dp, nblks);
}
error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, args.trans, dp, nblks, 0,
rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
XFS_QMOPT_RES_REGBLKS);
if (error) {
xfs_iunlock(dp, XFS_ILOCK_EXCL);
xfs_trans_cancel(args.trans, XFS_TRANS_RELEASE_LOG_RES);
return (error);
}
}
xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args.trans, dp);
......@@ -483,12 +475,9 @@ xfs_attr_remove(bhv_desc_t *bdp, char *name, int flags, struct cred *cred)
/*
* Attach the dquots to the inode.
*/
if (XFS_IS_QUOTA_ON(mp)) {
if (XFS_NOT_DQATTACHED(mp, dp)) {
if ((error = xfs_qm_dqattach(dp, 0)))
if ((error = XFS_QM_DQATTACH(mp, dp, 0)))
return (error);
}
}
/*
* Start our first transaction of the day.
*
......
......@@ -2145,7 +2145,7 @@ xfs_bmap_alloc(
*/
mp = ap->ip->i_mount;
nullfb = ap->firstblock == NULLFSBLOCK;
rt = (ap->ip->i_d.di_flags & XFS_DIFLAG_REALTIME) && ap->userdata;
rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
#ifdef __KERNEL__
if (rt) {
......@@ -2467,14 +2467,10 @@ xfs_bmap_alloc(
* Adjust the disk quota also. This was reserved
* earlier.
*/
if (XFS_IS_QUOTA_ON(mp) &&
ap->ip->i_ino != mp->m_sb.sb_uquotino &&
ap->ip->i_ino != mp->m_sb.sb_gquotino)
xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
ap->wasdel ?
XFS_TRANS_DQ_DELRTBCOUNT :
XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip,
ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
XFS_TRANS_DQ_RTBCOUNT,
(long)ralen);
(long) ralen);
} else
ap->alen = 0;
#endif /* __KERNEL__ */
......@@ -2691,14 +2687,10 @@ xfs_bmap_alloc(
* Adjust the disk quota also. This was reserved
* earlier.
*/
if (XFS_IS_QUOTA_ON(mp) &&
ap->ip->i_ino != mp->m_sb.sb_uquotino &&
ap->ip->i_ino != mp->m_sb.sb_gquotino)
xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
ap->wasdel ?
XFS_TRANS_DQ_DELBCOUNT :
XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip,
ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
XFS_TRANS_DQ_BCOUNT,
(long)args.len);
(long) args.len);
} else {
ap->rval = NULLFSBLOCK;
ap->alen = 0;
......@@ -2755,10 +2747,7 @@ xfs_bmap_btree_to_extents(
return error;
xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp);
ip->i_d.di_nblocks--;
if (XFS_IS_QUOTA_ON(mp) &&
ip->i_ino != mp->m_sb.sb_uquotino &&
ip->i_ino != mp->m_sb.sb_gquotino)
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
xfs_trans_binval(tp, cbp);
if (cur->bc_bufs[0] == cbp)
cur->bc_bufs[0] = NULL;
......@@ -2854,9 +2843,6 @@ xfs_bmap_del_extent(
goto done;
do_fx = 0;
nblks = len * mp->m_sb.sb_rextsize;
if (XFS_IS_QUOTA_ON(mp) &&
ip->i_ino != mp->m_sb.sb_uquotino &&
ip->i_ino != mp->m_sb.sb_gquotino)
qfield = XFS_TRANS_DQ_RTBCOUNT;
}
/*
......@@ -2865,9 +2851,6 @@ xfs_bmap_del_extent(
else {
do_fx = 1;
nblks = del->br_blockcount;
if (XFS_IS_QUOTA_ON(mp) &&
ip->i_ino != mp->m_sb.sb_uquotino &&
ip->i_ino != mp->m_sb.sb_gquotino)
qfield = XFS_TRANS_DQ_BCOUNT;
}
/*
......@@ -3088,7 +3071,8 @@ xfs_bmap_del_extent(
* Adjust quota data.
*/
if (qfield)
xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, qfield, (long)-nblks);
/*
* Account for change in delayed indirect blocks.
* Nothing to do for disk quota accounting here.
......@@ -3239,10 +3223,7 @@ xfs_bmap_extents_to_btree(
*firstblock = cur->bc_private.b.firstblock = args.fsbno;
cur->bc_private.b.allocated++;
ip->i_d.di_nblocks++;
if (XFS_IS_QUOTA_ON(mp) &&
ip->i_ino != mp->m_sb.sb_uquotino &&
ip->i_ino != mp->m_sb.sb_gquotino)
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
/*
* Fill in the child block.
......@@ -3385,11 +3366,8 @@ xfs_bmap_local_to_extents(
xfs_bmap_trace_post_update(fname, "new", ip, 0, whichfork);
XFS_IFORK_NEXT_SET(ip, whichfork, 1);
ip->i_d.di_nblocks = 1;
if (XFS_IS_QUOTA_ON(args.mp) &&
ip->i_ino != args.mp->m_sb.sb_uquotino &&
ip->i_ino != args.mp->m_sb.sb_gquotino)
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT,
1L);
XFS_TRANS_MOD_DQUOT_BYINO(args.mp, tp, ip,
XFS_TRANS_DQ_BCOUNT, 1L);
flags |= XFS_ILOG_FEXT(whichfork);
} else
ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
......@@ -3772,19 +3750,14 @@ xfs_bmap_add_attrfork(
XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT)))
goto error0;
xfs_ilock(ip, XFS_ILOCK_EXCL);
if (XFS_IS_QUOTA_ON(mp)) {
if (rsvd) {
error = xfs_trans_reserve_blkquota_force(tp, ip, blks);
} else {
error = xfs_trans_reserve_blkquota(tp, ip, blks);
}
error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, blks, 0, rsvd ?
XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
XFS_QMOPT_RES_REGBLKS);
if (error) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES);
return error;
}
}
if (XFS_IFORK_Q(ip))
goto error1;
if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
......@@ -4655,8 +4628,8 @@ xfs_bmapi(
cur = NULL;
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
ASSERT(wr && tp);
if ((error = xfs_bmap_local_to_extents(tp, ip, firstblock, total,
&logflags, whichfork)))
if ((error = xfs_bmap_local_to_extents(tp, ip,
firstblock, total, &logflags, whichfork)))
goto error0;
}
if (wr && *firstblock == NULLFSBLOCK) {
......@@ -4730,9 +4703,8 @@ xfs_bmapi(
* We return EDQUOT if we haven't allocated
* blks already inside this loop;
*/
if (XFS_IS_QUOTA_ON(ip->i_mount) &&
xfs_trans_reserve_blkquota(NULL, ip,
(long)alen)) {
if (XFS_TRANS_RESERVE_BLKQUOTA(
mp, NULL, ip, (long)alen)) {
if (n == 0) {
*nmap = 0;
ASSERT(cur == NULL);
......@@ -4740,12 +4712,10 @@ xfs_bmapi(
}
break;
}
if (xfs_mod_incore_sb(ip->i_mount,
XFS_SBS_FDBLOCKS,
if (xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
-(alen + indlen), rsvd)) {
if (XFS_IS_QUOTA_ON(ip->i_mount))
xfs_trans_unreserve_blkquota(
NULL, ip, (long)alen);
XFS_TRANS_UNRESERVE_BLKQUOTA(
mp, NULL, ip, (long)alen);
break;
}
ip->i_delayed_blks += alen;
......@@ -4808,15 +4778,11 @@ xfs_bmapi(
alen = bma.alen;
aoff = bma.off;
ASSERT(*firstblock == NULLFSBLOCK ||
XFS_FSB_TO_AGNO(ip->i_mount,
*firstblock) ==
XFS_FSB_TO_AGNO(ip->i_mount,
bma.firstblock) ||
XFS_FSB_TO_AGNO(mp, *firstblock) ==
XFS_FSB_TO_AGNO(mp, bma.firstblock) ||
(flist->xbf_low &&
XFS_FSB_TO_AGNO(ip->i_mount,
*firstblock) <
XFS_FSB_TO_AGNO(ip->i_mount,
bma.firstblock)));
XFS_FSB_TO_AGNO(mp, *firstblock) <
XFS_FSB_TO_AGNO(mp, bma.firstblock)));
*firstblock = bma.firstblock;
if (cur)
cur->bc_private.b.firstblock =
......@@ -4824,7 +4790,7 @@ xfs_bmapi(
if (abno == NULLFSBLOCK)
break;
if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
cur = xfs_btree_init_cursor(ip->i_mount,
cur = xfs_btree_init_cursor(mp,
tp, NULL, 0, XFS_BTNUM_BMAP,
ip, whichfork);
cur->bc_private.b.firstblock =
......@@ -4941,7 +4907,7 @@ xfs_bmapi(
*/
ASSERT(mval->br_blockcount <= len);
if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
cur = xfs_btree_init_cursor(ip->i_mount,
cur = xfs_btree_init_cursor(mp,
tp, NULL, 0, XFS_BTNUM_BMAP,
ip, whichfork);
cur->bc_private.b.firstblock =
......@@ -5063,12 +5029,12 @@ xfs_bmapi(
if (cur) {
if (!error) {
ASSERT(*firstblock == NULLFSBLOCK ||
XFS_FSB_TO_AGNO(ip->i_mount, *firstblock) ==
XFS_FSB_TO_AGNO(ip->i_mount,
XFS_FSB_TO_AGNO(mp, *firstblock) ==
XFS_FSB_TO_AGNO(mp,
cur->bc_private.b.firstblock) ||
(flist->xbf_low &&
XFS_FSB_TO_AGNO(ip->i_mount, *firstblock) <
XFS_FSB_TO_AGNO(ip->i_mount,
XFS_FSB_TO_AGNO(mp, *firstblock) <
XFS_FSB_TO_AGNO(mp,
cur->bc_private.b.firstblock)));
*firstblock = cur->bc_private.b.firstblock;
}
......@@ -5378,16 +5344,11 @@ xfs_bunmapi(
ASSERT(STARTBLOCKVAL(del.br_startblock) > 0);
xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
(int)del.br_blockcount, rsvd);
if (XFS_IS_QUOTA_ON(ip->i_mount)) {
ASSERT(ip->i_ino != mp->m_sb.sb_uquotino);
ASSERT(ip->i_ino != mp->m_sb.sb_gquotino);
if (!isrt)
xfs_trans_unreserve_blkquota(NULL, ip,
(long)del.br_blockcount);
else
xfs_trans_unreserve_rtblkquota(NULL, ip,
(long)del.br_blockcount);
}
/* Unreserve our quota space */
XFS_TRANS_RESERVE_QUOTA_NBLKS(
mp, NULL, ip, -((long)del.br_blockcount), 0,
isrt ? XFS_QMOPT_RES_RTBLKS :
XFS_QMOPT_RES_REGBLKS);
ip->i_delayed_blks -= del.br_blockcount;
if (cur)
cur->bc_private.b.flags |=
......@@ -5556,8 +5517,7 @@ xfs_getbmap(
&& DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ)
&& whichfork == XFS_DATA_FORK) {
error = xfs_dm_send_data_event(DM_EVENT_READ, bdp,
0, 0, 0, NULL);
error = XFS_SEND_DATA(mp, DM_EVENT_READ, bdp, 0, 0, 0, NULL);
if (error)
return XFS_ERROR(error);
}
......@@ -5579,7 +5539,6 @@ xfs_getbmap(
ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
return XFS_ERROR(EINVAL);
if (whichfork == XFS_DATA_FORK) {
if (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC) {
prealloced = 1;
......@@ -5928,10 +5887,13 @@ xfs_check_block(
thispa = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize,
xfs_bmbt, block, j, dmxr);
}
if (INT_GET(*thispa, ARCH_CONVERT) == INT_GET(*pp, ARCH_CONVERT)) {
printk("xfs_check_block: thispa(%d) == pp(%d) %Ld\n",
j, i, INT_GET(*thispa, ARCH_CONVERT));
panic("xfs_check_block: ptrs are equal in node\n");
if (INT_GET(*thispa, ARCH_CONVERT) ==
INT_GET(*pp, ARCH_CONVERT)) {
cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld",
__FUNCTION__, j, i,
INT_GET(*thispa, ARCH_CONVERT));
panic("%s: ptrs are equal in node\n",
__FUNCTION__);
}
}
}
......@@ -6089,12 +6051,13 @@ xfs_bmap_check_leaf_extents(
return;
error0:
printk("at error0\n");
cmn_err(CE_WARN, "%s: at error0", __FUNCTION__);
if (bp_release)
xfs_trans_brelse(NULL, bp);
error_norelse:
printk("xfs_bmap_check_leaf_extents: BAD after btree leaves for %d extents\n", i);
panic("xfs_bmap_check_leaf_extents: CORRUPTED BTREE OR SOMETHING");
cmn_err(CE_WARN, "%s: BAD after btree leaves for %d extents",
i, __FUNCTION__);
panic("%s: CORRUPTED BTREE OR SOMETHING", __FUNCTION__);
return;
}
#endif
......
......@@ -629,8 +629,8 @@ xfs_bmbt_delrec(
xfs_bmbt_log_block(cur, lbp, XFS_BB_RIGHTSIB | XFS_BB_NUMRECS);
if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) {
if ((error = xfs_btree_read_bufl(mp, cur->bc_tp,
INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, &rrbp,
XFS_BMAP_BTREE_REF))) {
INT_GET(left->bb_rightsib, ARCH_CONVERT),
0, &rrbp, XFS_BMAP_BTREE_REF))) {
XFS_BMBT_TRACE_CURSOR(cur, ERROR);
goto error0;
}
......@@ -646,10 +646,7 @@ xfs_bmbt_delrec(
cur->bc_private.b.flist, mp);
cur->bc_private.b.ip->i_d.di_nblocks--;
xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
if (XFS_IS_QUOTA_ON(mp) &&
cur->bc_private.b.ip->i_ino != mp->m_sb.sb_uquotino &&
cur->bc_private.b.ip->i_ino != mp->m_sb.sb_gquotino)
xfs_trans_mod_dquot_byino(cur->bc_tp, cur->bc_private.b.ip,
XFS_TRANS_MOD_DQUOT_BYINO(mp, cur->bc_tp, cur->bc_private.b.ip,
XFS_TRANS_DQ_BCOUNT, -1L);
xfs_trans_binval(cur->bc_tp, rbp);
if (bp != lbp) {
......@@ -988,11 +985,8 @@ xfs_bmbt_killroot(
xfs_bmap_add_free(XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(cbp)), 1,
cur->bc_private.b.flist, cur->bc_mp);
ip->i_d.di_nblocks--;
if (XFS_IS_QUOTA_ON(cur->bc_mp) &&
ip->i_ino != cur->bc_mp->m_sb.sb_uquotino &&
ip->i_ino != cur->bc_mp->m_sb.sb_gquotino)
xfs_trans_mod_dquot_byino(cur->bc_tp, ip, XFS_TRANS_DQ_BCOUNT,
-1L);
XFS_TRANS_MOD_DQUOT_BYINO(cur->bc_mp, cur->bc_tp, ip,
XFS_TRANS_DQ_BCOUNT, -1L);
xfs_trans_binval(cur->bc_tp, cbp);
cur->bc_bufs[level - 1] = NULL;
INT_MOD(block->bb_level, ARCH_CONVERT, -1);
......@@ -1589,10 +1583,7 @@ xfs_bmbt_split(
cur->bc_private.b.allocated++;
cur->bc_private.b.ip->i_d.di_nblocks++;
xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
if (XFS_IS_QUOTA_ON(args.mp) &&
cur->bc_private.b.ip->i_ino != args.mp->m_sb.sb_uquotino &&
cur->bc_private.b.ip->i_ino != args.mp->m_sb.sb_gquotino)
xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip,
XFS_TRANS_DQ_BCOUNT, 1L);
rbp = xfs_btree_get_bufl(args.mp, args.tp, args.fsbno, 0);
right = XFS_BUF_TO_BMBT_BLOCK(rbp);
......@@ -2390,10 +2381,7 @@ xfs_bmbt_newroot(
cur->bc_private.b.firstblock = args.fsbno;
cur->bc_private.b.allocated++;
cur->bc_private.b.ip->i_d.di_nblocks++;
if (XFS_IS_QUOTA_ON(args.mp) &&
cur->bc_private.b.ip->i_ino != args.mp->m_sb.sb_uquotino &&
cur->bc_private.b.ip->i_ino != args.mp->m_sb.sb_gquotino)
xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip,
XFS_TRANS_DQ_BCOUNT, 1L);
bp = xfs_btree_get_bufl(args.mp, cur->bc_tp, args.fsbno, 0);
cblock = XFS_BUF_TO_BMBT_BLOCK(bp);
......
......@@ -32,11 +32,6 @@
#ifndef __XFS_DMAPI_H__
#define __XFS_DMAPI_H__
#ifdef CONFIG_XFS_DMAPI
#include <dmapi/dmapi.h>
#include <dmapi/dmapi_kern.h>
/* Values used to define the on-disk version of dm_attrname_t. All
* on-disk attribute names start with the 8-byte string "SGI_DMI_".
*
......@@ -48,6 +43,42 @@
#define DMATTR_PREFIXLEN 8
#define DMATTR_PREFIXSTRING "SGI_DMI_"
typedef enum {
DM_EVENT_INVALID = -1,
DM_EVENT_CANCEL = 0, /* not supported */
DM_EVENT_MOUNT = 1,
DM_EVENT_PREUNMOUNT = 2,
DM_EVENT_UNMOUNT = 3,
DM_EVENT_DEBUT = 4, /* not supported */
DM_EVENT_CREATE = 5,
DM_EVENT_CLOSE = 6, /* not supported */
DM_EVENT_POSTCREATE = 7,
DM_EVENT_REMOVE = 8,
DM_EVENT_POSTREMOVE = 9,
DM_EVENT_RENAME = 10,
DM_EVENT_POSTRENAME = 11,
DM_EVENT_LINK = 12,
DM_EVENT_POSTLINK = 13,
DM_EVENT_SYMLINK = 14,
DM_EVENT_POSTSYMLINK = 15,
DM_EVENT_READ = 16,
DM_EVENT_WRITE = 17,
DM_EVENT_TRUNCATE = 18,
DM_EVENT_ATTRIBUTE = 19,
DM_EVENT_DESTROY = 20,
DM_EVENT_NOSPACE = 21,
DM_EVENT_USER = 22,
DM_EVENT_MAX = 23
} dm_eventtype_t;
#define HAVE_DM_EVENTTYPE_T
typedef enum {
DM_RIGHT_NULL,
DM_RIGHT_SHARED,
DM_RIGHT_EXCL
} dm_right_t;
#define HAVE_DM_RIGHT_T
/* Defines for determining if an event message should be sent. */
#define DM_EVENT_ENABLED(vfsp, ip, event) ( \
unlikely ((vfsp)->vfs_flag & VFS_DMI) && \
......@@ -58,23 +89,6 @@
#define DM_EVENT_ENABLED_IO(vfsp, io, event) ( \
unlikely ((vfsp)->vfs_flag & VFS_DMI) && \
( ((io)->io_dmevmask & (1 << event)) || \
((io)->io_mount->m_dmevmask & (1 << event)) ) \
)
/*
* Macros to turn caller specified delay/block flags into
* dm_send_xxxx_event flag DM_FLAGS_NDELAY.
*/
#define FILP_DELAY_FLAG(filp) ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) ? \
DM_FLAGS_NDELAY : 0)
#define AT_DELAY_FLAG(f) ((f&ATTR_NONBLOCK) ? DM_FLAGS_NDELAY : 0)
/* events valid in dm_set_eventlist() when called with a filesystem handle.
These events are not persistent.
*/
#define DM_XFS_VALID_FS_EVENTS ( \
(1 << DM_EVENT_PREUNMOUNT) | \
......@@ -120,7 +134,6 @@
(1 << DM_EVENT_ATTRIBUTE) | \
(1 << DM_EVENT_DESTROY) )
/* Events supported by the XFS filesystem. */
#define DM_XFS_SUPPORTED_EVENTS ( \
(1 << DM_EVENT_MOUNT) | \
......@@ -144,156 +157,34 @@
(1 << DM_EVENT_DESTROY) )
extern int
xfs_dm_get_fsys_vector(
bhv_desc_t *bdp,
caddr_t vecrq);
extern int
xfs_dm_send_data_event(
dm_eventtype_t event,
bhv_desc_t *bdp,
xfs_off_t offset,
size_t length,
int flags,
vrwlock_t *locktype);
extern int
xfs_dm_send_mmap_event(
struct vm_area_struct *vma,
unsigned int wantflag);
#else /* CONFIG_XFS_DMAPI */
/*
* Flags needed to build with dmapi disabled.
*/
typedef enum {
DM_EVENT_INVALID = -1,
DM_EVENT_CANCEL = 0, /* not supported */
DM_EVENT_MOUNT = 1,
DM_EVENT_PREUNMOUNT = 2,
DM_EVENT_UNMOUNT = 3,
DM_EVENT_DEBUT = 4, /* not supported */
DM_EVENT_CREATE = 5,
DM_EVENT_CLOSE = 6, /* not supported */
DM_EVENT_POSTCREATE = 7,
DM_EVENT_REMOVE = 8,
DM_EVENT_POSTREMOVE = 9,
DM_EVENT_RENAME = 10,
DM_EVENT_POSTRENAME = 11,
DM_EVENT_LINK = 12,
DM_EVENT_POSTLINK = 13,
DM_EVENT_SYMLINK = 14,
DM_EVENT_POSTSYMLINK = 15,
DM_EVENT_READ = 16,
DM_EVENT_WRITE = 17,
DM_EVENT_TRUNCATE = 18,
DM_EVENT_ATTRIBUTE = 19,
DM_EVENT_DESTROY = 20,
DM_EVENT_NOSPACE = 21,
DM_EVENT_USER = 22,
DM_EVENT_MAX = 23
} dm_eventtype_t;
typedef enum {
DM_RIGHT_NULL,
DM_RIGHT_SHARED,
DM_RIGHT_EXCL
} dm_right_t;
/*
* Defines for determining if an event message should be sent.
*/
#define DM_EVENT_ENABLED(vfsp, ip, event) 0
#define DM_EVENT_ENABLED_IO(vfsp, io, event) 0
/*
* Stubbed out DMAPI delay macros.
*/
#define FILP_DELAY_FLAG(filp) 0
#define AT_DELAY_FLAG(f) 0
/*
* Events supported by the XFS filesystem.
*/
#define DM_XFS_VALID_FS_EVENTS 0
#define DM_XFS_VALID_FILE_EVENTS 0
#define DM_XFS_VALID_DIRECTORY_EVENTS 0
#define DM_XFS_SUPPORTED_EVENTS 0
/*
* Dummy definitions used for the flags field on dm_send_*_event().
* Definitions used for the flags field on dm_send_*_event().
*/
#define DM_FLAGS_NDELAY 0x001 /* return EAGAIN after dm_pending() */
#define DM_FLAGS_UNWANTED 0x002 /* event not in fsys dm_eventset_t */
/*
* Stubs for XFS DMAPI utility routines.
* Macros to turn caller specified delay/block flags into
* dm_send_xxxx_event flag DM_FLAGS_NDELAY.
*/
static __inline int
xfs_dm_send_data_event(
dm_eventtype_t event,
bhv_desc_t *bdp,
xfs_off_t offset,
size_t length,
int flags,
vrwlock_t *locktype)
{
return ENOSYS;
}
static __inline int
xfs_dm_send_mmap_event(
struct vm_area_struct *vma,
unsigned int wantflag)
{
return 0;
}
#define FILP_DELAY_FLAG(filp) ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) ? \
DM_FLAGS_NDELAY : 0)
#define AT_DELAY_FLAG(f) ((f&ATTR_NONBLOCK) ? DM_FLAGS_NDELAY : 0)
/*
* Stubs for routines needed for the X/Open version of DMAPI.
* Macros to turn caller specified delay/block flags into
* dm_send_xxxx_event flag DM_FLAGS_NDELAY.
*/
static __inline int
dm_send_destroy_event(
bhv_desc_t *bdp,
dm_right_t vp_right)
{
return ENOSYS;
}
#define FILP_DELAY_FLAG(filp) ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) ? \
DM_FLAGS_NDELAY : 0)
static __inline int
dm_send_namesp_event(
dm_eventtype_t event,
bhv_desc_t *bdp1,
dm_right_t vp1_right,
bhv_desc_t *bdp2,
dm_right_t vp2_right,
char *name1,
char *name2,
mode_t mode,
int retcode,
int flags)
{
return ENOSYS;
}
extern struct bhv_vfsops xfs_dmops;
static __inline void
dm_send_unmount_event(
vfs_t *vfsp,
vnode_t *vp,
dm_right_t vfsp_right,
mode_t mode,
int retcode,
int flags)
{
}
extern void xfs_dm_init(void);
extern void xfs_dm_exit(void);
#endif /* CONFIG_XFS_DMAPI */
#endif /* __XFS_DMAPI_H__ */
......@@ -29,103 +29,14 @@
*
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/
#include <xfs.h>
#define MNTOPT_DMAPI "dmapi" /* DMI enabled (DMAPI / XDSM) */
#define MNTOPT_XDSM "xdsm" /* DMI enabled (DMAPI / XDSM) */
STATIC int
xfs_dm_parseargs(
struct bhv_desc *bhv,
char *options,
struct xfs_mount_args *args,
int update)
{
size_t length;
char *local_options = options;
char *this_char;
int error;
while ((this_char = strsep(&local_options, ",")) != NULL) {
length = strlen(this_char);
if (local_options)
length++;
if (!strcmp(this_char, MNTOPT_DMAPI)) {
args->flags |= XFSMNT_DMAPI;
} else if (!strcmp(this_char, MNTOPT_XDSM)) {
args->flags |= XFSMNT_DMAPI;
} else {
if (local_options)
*(local_options-1) = ',';
continue;
}
while (length--)
*this_char++ = ',';
}
PVFS_PARSEARGS(BHV_NEXT(bhv), options, args, update, error);
if (!error && (args->flags & XFSMNT_DMAPI) && (*args->mtpt == '\0'))
error = EINVAL;
if (!error && !update && !(args->flags & XFSMNT_DMAPI))
bhv_remove_vfsops(bhvtovfs(bhv), VFS_POSITION_DM);
return error;
}
STATIC int
xfs_dm_showargs(
struct bhv_desc *bhv,
struct seq_file *m)
{
struct vfs *vfsp = bhvtovfs(bhv);
int error;
if (vfsp->vfs_flag & VFS_DMI)
seq_puts(m, "," MNTOPT_DMAPI);
PVFS_SHOWARGS(BHV_NEXT(bhv), m, error);
return error;
}
STATIC int
xfs_dm_mount(
struct bhv_desc *bhv,
struct xfs_mount_args *args,
struct cred *cr)
{
struct bhv_desc *rootbdp;
struct vnode *rootvp;
struct vfs *vfsp;
int error = 0;
PVFS_MOUNT(BHV_NEXT(bhv), args, cr, error);
if (error)
return error;
if (args->flags & XFSMNT_DMAPI) {
vfsp = bhvtovfs(bhv);
VFS_ROOT(vfsp, &rootvp, error);
if (!error) {
vfsp->vfs_flag |= VFS_DMI;
rootbdp = vn_bhv_lookup_unlocked(
VN_BHV_HEAD(rootvp), &xfs_vnodeops);
VN_RELE(rootvp);
error = dm_send_mount_event(vfsp, DM_RIGHT_NULL, NULL,
DM_RIGHT_NULL, rootbdp, DM_RIGHT_NULL,
args->mtpt, args->fsname);
}
}
return error;
}
vfsops_t xfs_dmops_xfs = {
BHV_IDENTITY_INIT(VFS_BHV_DM, VFS_POSITION_DM),
.vfs_mount = xfs_dm_mount,
.vfs_parseargs = xfs_dm_parseargs,
.vfs_showargs = xfs_dm_showargs,
.vfs_dmapiops = xfs_dm_get_fsys_vector,
#ifndef CONFIG_XFS_DMAPI
xfs_dmops_t xfs_dmcore_xfs = {
.xfs_send_data = (xfs_send_data_t)fs_nosys,
.xfs_send_mmap = (xfs_send_mmap_t)fs_noerr,
.xfs_send_destroy = (xfs_send_destroy_t)fs_nosys,
.xfs_send_namesp = (xfs_send_namesp_t)fs_nosys,
.xfs_send_unmount = (xfs_send_unmount_t)fs_noval,
};
#endif /* CONFIG_XFS_DMAPI */
......@@ -591,9 +591,7 @@ xfs_ireclaim(xfs_inode_t *ip)
* Release dquots (and their references) if any. An inode may escape
* xfs_inactive and get here via vn_alloc->vn_reclaim path.
*/
if (ip->i_udquot || ip->i_gdquot) {
xfs_qm_dqdettach_inode(ip);
}
XFS_QM_DQDETACH(ip->i_mount, ip);
/*
* Pull our behavior descriptor from the vnode chain.
......
......@@ -136,16 +136,15 @@ xlog_bwrite(
/*
* check log record header for recovery
*/
static void
xlog_header_check_dump(xfs_mount_t *mp, xlog_rec_header_t *head)
{
int b;
printk("xlog_header_check_dump:\n SB : uuid = ");
printk("%s: SB : uuid = ", __FUNCTION__);
for (b=0;b<16;b++) printk("%02x",((unsigned char *)&mp->m_sb.sb_uuid)[b]);
printk(", fmt = %d\n",XLOG_FMT);
printk(" log: uuid = ");
printk(" log : uuid = ");
for (b=0;b<16;b++) printk("%02x",((unsigned char *)&head->h_fs_uuid)[b]);
printk(", fmt = %d\n", INT_GET(head->h_fmt, ARCH_CONVERT));
}
......@@ -1813,7 +1812,6 @@ xlog_recover_do_reg_buffer(xfs_mount_t *mp,
*/
error = 0;
if (buf_f->blf_flags & (XFS_BLI_UDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
/* OK, if this returns ENOSYS */
error = xfs_qm_dqcheck((xfs_disk_dquot_t *)
item->ri_buf[i].i_addr,
-1, 0, XFS_QMOPT_DOWARN,
......@@ -1832,6 +1830,120 @@ xlog_recover_do_reg_buffer(xfs_mount_t *mp,
ASSERT(i == item->ri_total);
} /* xlog_recover_do_reg_buffer */
/*
* Do some primitive error checking on ondisk dquot data structures.
*/
int
xfs_qm_dqcheck(
xfs_disk_dquot_t *ddq,
xfs_dqid_t id,
uint type, /* used only when IO_dorepair is true */
uint flags,
char *str)
{
xfs_dqblk_t *d = (xfs_dqblk_t *)ddq;
int errs = 0;
/*
* We can encounter an uninitialized dquot buffer for 2 reasons:
* 1. If we crash while deleting the quotainode(s), and those blks got
* used for user data. This is because we take the path of regular
* file deletion; however, the size field of quotainodes is never
* updated, so all the tricks that we play in itruncate_finish
* don't quite matter.
*
* 2. We don't play the quota buffers when there's a quotaoff logitem.
* But the allocation will be replayed so we'll end up with an
* uninitialized quota block.
*
* This is all fine; things are still consistent, and we haven't lost
* any quota information. Just don't complain about bad dquot blks.
*/
if (INT_GET(ddq->d_magic, ARCH_CONVERT) != XFS_DQUOT_MAGIC) {
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_ALERT,
"%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
str, id,
INT_GET(ddq->d_magic, ARCH_CONVERT), XFS_DQUOT_MAGIC);
errs++;
}
if (INT_GET(ddq->d_version, ARCH_CONVERT) != XFS_DQUOT_VERSION) {
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_ALERT,
"%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
str, id,
INT_GET(ddq->d_magic, ARCH_CONVERT), XFS_DQUOT_VERSION);
errs++;
}
if (INT_GET(ddq->d_flags, ARCH_CONVERT) != XFS_DQ_USER &&
INT_GET(ddq->d_flags, ARCH_CONVERT) != XFS_DQ_GROUP) {
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_ALERT,
"%s : XFS dquot ID 0x%x, unknown flags 0x%x",
str, id, INT_GET(ddq->d_flags, ARCH_CONVERT));
errs++;
}
if (id != -1 && id != INT_GET(ddq->d_id, ARCH_CONVERT)) {
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_ALERT,
"%s : ondisk-dquot 0x%x, ID mismatch: "
"0x%x expected, found id 0x%x",
str, ddq, id, INT_GET(ddq->d_id, ARCH_CONVERT));
errs++;
}
if (! errs) {
if (INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT) &&
INT_GET(ddq->d_bcount, ARCH_CONVERT) >=
INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT)) {
if (INT_ISZERO(ddq->d_btimer, ARCH_CONVERT) &&
!INT_ISZERO(ddq->d_id, ARCH_CONVERT)) {
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_ALERT,
"%s : Dquot ID 0x%x (0x%x) "
"BLK TIMER NOT STARTED",
str, (int)
INT_GET(ddq->d_id, ARCH_CONVERT), ddq);
errs++;
}
}
if (INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT) &&
INT_GET(ddq->d_icount, ARCH_CONVERT) >=
INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT)) {
if (INT_ISZERO(ddq->d_itimer, ARCH_CONVERT) &&
!INT_ISZERO(ddq->d_id, ARCH_CONVERT)) {
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_ALERT,
"%s : Dquot ID 0x%x (0x%x) "
"INODE TIMER NOT STARTED",
str, (int)
INT_GET(ddq->d_id, ARCH_CONVERT), ddq);
errs++;
}
}
}
if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
return errs;
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id);
/*
* Typically, a repair is only requested by quotacheck.
*/
ASSERT(id != -1);
ASSERT(flags & XFS_QMOPT_DQREPAIR);
memset(d, 0, sizeof(xfs_dqblk_t));
INT_SET(d->dd_diskdq.d_magic, ARCH_CONVERT, XFS_DQUOT_MAGIC);
INT_SET(d->dd_diskdq.d_version, ARCH_CONVERT, XFS_DQUOT_VERSION);
INT_SET(d->dd_diskdq.d_id, ARCH_CONVERT, id);
INT_SET(d->dd_diskdq.d_flags, ARCH_CONVERT, type);
return errs;
}
/*
* Perform a dquot buffer recovery.
......@@ -2335,8 +2447,6 @@ xlog_recover_do_dquot_trans(xlog_t *log,
dq_f->qlf_id,
0, XFS_QMOPT_DOWARN,
"xlog_recover_do_dquot_trans (log copy)"))) {
if (error == ENOSYS)
return (0);
return XFS_ERROR(EIO);
}
ASSERT(dq_f->qlf_len == 1);
......@@ -2923,8 +3033,6 @@ xlog_recover_process_iunlinks(xlog_t *log)
/*
* Prevent any DMAPI event from being sent while in this function.
* Not a problem for xfs since the file system isn't mounted
* yet. It is a problem for cxfs recovery.
*/
mp_dmevmask = mp->m_dmevmask;
mp->m_dmevmask = 0;
......@@ -2982,10 +3090,7 @@ xlog_recover_process_iunlinks(xlog_t *log)
* Prevent any DMAPI event from
* being sent when the
* reference on the inode is
* dropped. Not a problem for
* xfs since the file system
* isn't mounted yet. It is a
* problem for cxfs recovery.
* dropped.
*/
ip->i_d.di_dmevmask = 0;
......
......@@ -32,7 +32,6 @@
#include <xfs.h>
STATIC void xfs_mount_reset_sbqflags(xfs_mount_t *);
STATIC void xfs_mount_log_sbunit(xfs_mount_t *, __int64_t);
STATIC int xfs_uuid_mount(xfs_mount_t *);
STATIC void xfs_uuid_unmount(xfs_mount_t *mp);
......@@ -154,13 +153,11 @@ xfs_mount_free(
spinlock_destroy(&mp->m_sb_lock);
mutex_destroy(&mp->m_ilock);
freesema(&mp->m_growlock);
if (mp->m_quotainfo)
XFS_QM_DONE(mp);
if (mp->m_fsname != NULL) {
if (mp->m_fsname != NULL)
kmem_free(mp->m_fsname, mp->m_fsname_len);
}
if (mp->m_quotainfo != NULL) {
xfs_qm_unmount_quotadestroy(mp);
}
if (remove_bhv) {
struct vfs *vfsp = XFS_MTOVFS(mp);
......@@ -606,10 +603,8 @@ xfs_mountfs(
vmap_t vmap;
xfs_daddr_t d;
__uint64_t ret64;
uint quotaflags, quotaondisk;
uint uquotaondisk = 0, gquotaondisk = 0;
boolean_t needquotamount;
__int64_t update_flags;
uint quotamount, quotaflags;
int agno, noio;
int uuid_mounted = 0;
int error = 0;
......@@ -946,54 +941,22 @@ xfs_mountfs(
ASSERT(rip != NULL);
rvp = XFS_ITOV(rip);
VMAP(rvp, vmap);
if (unlikely((rip->i_d.di_mode & IFMT) != IFDIR)) {
cmn_err(CE_WARN, "XFS: corrupted root inode");
VMAP(rvp, vmap);
prdev("Root inode %llu is not a directory",
mp->m_dev, (unsigned long long)rip->i_ino);
xfs_iunlock(rip, XFS_ILOCK_EXCL);
VN_RELE(rvp);
vn_purge(rvp, &vmap);
XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW,
mp);
error = XFS_ERROR(EFSCORRUPTED);
goto error3;
goto error4;
}
mp->m_rootip = rip; /* save it */
xfs_iunlock(rip, XFS_ILOCK_EXCL);
quotaondisk = XFS_SB_VERSION_HASQUOTA(&mp->m_sb) &&
mp->m_sb.sb_qflags & (XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT);
if (quotaondisk) {
uquotaondisk = mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT;
gquotaondisk = mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT;
}
/*
* If the device itself is read-only, we can't allow
* the user to change the state of quota on the mount -
* this would generate a transaction on the ro device,
* which would lead to an I/O error and shutdown
*/
if (((uquotaondisk && !XFS_IS_UQUOTA_ON(mp)) ||
(!uquotaondisk && XFS_IS_UQUOTA_ON(mp)) ||
(gquotaondisk && !XFS_IS_GQUOTA_ON(mp)) ||
(!gquotaondisk && XFS_IS_GQUOTA_ON(mp))) &&
xfs_dev_is_read_only(mp, "changing quota state")) {
cmn_err(CE_WARN,
"XFS: please mount with%s%s%s.",
(!quotaondisk ? "out quota" : ""),
(uquotaondisk ? " usrquota" : ""),
(gquotaondisk ? " grpquota" : ""));
VN_RELE(rvp);
vn_remove(rvp);
error = XFS_ERROR(EPERM);
goto error3;
}
/*
* Initialize realtime inode pointers in the mount structure
*/
......@@ -1002,10 +965,7 @@ xfs_mountfs(
* Free up the root inode.
*/
cmn_err(CE_WARN, "XFS: failed to read RT inodes");
VMAP(rvp, vmap);
VN_RELE(rvp);
vn_purge(rvp, &vmap);
goto error3;
goto error4;
}
/*
......@@ -1015,41 +975,11 @@ xfs_mountfs(
if (update_flags && !(vfsp->vfs_flag & VFS_RDONLY))
xfs_mount_log_sbunit(mp, update_flags);
quotaflags = 0;
needquotamount = B_FALSE;
/*
* Figure out if we'll need to do a quotacheck.
*/
if (XFS_IS_QUOTA_ON(mp) || quotaondisk) {
/*
* Call mount_quotas at this point only if we won't have to do
* a quotacheck.
*/
if (quotaondisk && !XFS_QM_NEED_QUOTACHECK(mp)) {
/*
* If the xfs quota code isn't installed,
* we have to reset the quotachk'd bit.
* If an error occurred, qm_mount_quotas code
* has already disabled quotas. So, just finish
* mounting, and get on with the boring life
* without disk quotas.
*/
if (xfs_qm_mount_quotas(mp))
xfs_mount_reset_sbqflags(mp);
} else {
/*
* Clear the quota flags, but remember them. This
* is so that the quota code doesn't get invoked
* before we're ready. This can happen when an
* inode goes inactive and wants to free blocks,
* or via xfs_log_mount_finish.
* Initialise the XFS quota management subsystem for this mount
*/
quotaflags = mp->m_qflags;
mp->m_qflags = 0;
needquotamount = B_TRUE;
}
}
if ((error = XFS_QM_INIT(mp, &quotamount, &quotaflags)))
goto error4;
/*
* Finish recovering the file system. This part needed to be
......@@ -1059,30 +989,23 @@ xfs_mountfs(
error = xfs_log_mount_finish(mp, mfsi_flags);
if (error) {
cmn_err(CE_WARN, "XFS: log mount finish failed");
goto error3;
}
if (needquotamount) {
ASSERT(mp->m_qflags == 0);
mp->m_qflags = quotaflags;
if (xfs_qm_mount_quotas(mp))
xfs_mount_reset_sbqflags(mp);
goto error4;
}
#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
if (! (XFS_IS_QUOTA_ON(mp)))
xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas not turned on");
else
xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas turned on");
#endif
#ifdef QUOTADEBUG
if (XFS_IS_QUOTA_ON(mp) && xfs_qm_internalqcheck(mp))
cmn_err(CE_WARN, "XFS: mount internalqcheck failed");
#endif
/*
* Complete the quota initialisation, post-log-replay component.
*/
if ((error = XFS_QM_MOUNT(mp, quotamount, quotaflags)))
goto error4;
return (0);
return 0;
error4:
/*
* Free up the root inode.
*/
VN_RELE(rvp);
vn_purge(rvp, &vmap);
error3:
xfs_log_unmount_dealloc(mp);
error2:
......@@ -1112,25 +1035,14 @@ int
xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
{
struct vfs *vfsp = XFS_MTOVFS(mp);
int ndquots;
#if defined(DEBUG) || defined(INDUCE_IO_ERROR)
int64_t fsid;
#endif
xfs_iflush_all(mp, XFS_FLUSH_ALL);
/*
* Purge the dquot cache.
* None of the dquots should really be busy at this point.
*/
if (mp->m_quotainfo) {
while ((ndquots = xfs_qm_dqpurge_all(mp,
XFS_QMOPT_UQUOTA|
XFS_QMOPT_GQUOTA|
XFS_QMOPT_UMOUNTING))) {
delay(ndquots * 10);
}
}
XFS_QM_DQPURGEALL(mp,
XFS_QMOPT_UQUOTA | XFS_QMOPT_GQUOTA | XFS_QMOPT_UMOUNTING);
/*
* Flush out the log synchronously so that we know for sure
......@@ -1645,47 +1557,6 @@ xfs_uuid_unmount(xfs_mount_t *mp)
mutex_unlock(&xfs_uuidtabmon);
}
/*
* When xfsquotas isn't installed and the superblock had quotas, we need to
* clear the quotaflags from superblock.
*/
STATIC void
xfs_mount_reset_sbqflags(
xfs_mount_t *mp)
{
xfs_trans_t *tp;
unsigned long s;
mp->m_qflags = 0;
/*
* It is OK to look at sb_qflags here in mount path,
* without SB_LOCK.
*/
if (mp->m_sb.sb_qflags == 0)
return;
s = XFS_SB_LOCK(mp);
mp->m_sb.sb_qflags = 0;
XFS_SB_UNLOCK(mp, s);
/*
* if the fs is readonly, let the incore superblock run
* with quotas off but don't flush the update out to disk
*/
if (XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY)
return;
#ifdef QUOTADEBUG
xfs_fs_cmn_err(CE_NOTE, mp, "Writing superblock quota changes");
#endif
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
if (xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
XFS_DEFAULT_LOG_COUNT)) {
xfs_trans_cancel(tp, 0);
return;
}
xfs_mod_sb(tp, XFS_SB_QFLAGS);
(void)xfs_trans_commit(tp, 0, NULL);
}
/*
* Used to log changes to the superblock unit and width fields which could
* be altered by the mount options. Only the first superblock is updated.
......
......@@ -75,7 +75,6 @@ struct xfs_ihash;
struct xfs_chash;
struct xfs_inode;
struct xfs_perag;
struct xfs_quotainfo;
struct xfs_iocore;
struct xfs_bmbt_irec;
struct xfs_bmap_free;
......@@ -87,12 +86,117 @@ struct xfs_bmap_free;
#define AIL_LOCK(mp,s) s=mutex_spinlock(&(mp)->m_ail_lock)
#define AIL_UNLOCK(mp,s) mutex_spinunlock(&(mp)->m_ail_lock, s)
/*
* Prototypes and functions for I/O core modularization.
* Prototypes and functions for the Data Migration subsystem.
*/
typedef int (*xfs_send_data_t)(int, struct bhv_desc *,
xfs_off_t, size_t, int, vrwlock_t *);
typedef int (*xfs_send_mmap_t)(struct vm_area_struct *, uint);
typedef int (*xfs_send_destroy_t)(struct bhv_desc *, dm_right_t);
typedef int (*xfs_send_namesp_t)(dm_eventtype_t, struct bhv_desc *,
dm_right_t, struct bhv_desc *, dm_right_t,
char *, char *, mode_t, int, int);
typedef void (*xfs_send_unmount_t)(struct vfs *, struct vnode *,
dm_right_t, mode_t, int, int);
typedef struct xfs_dmops {
xfs_send_data_t xfs_send_data;
xfs_send_mmap_t xfs_send_mmap;
xfs_send_destroy_t xfs_send_destroy;
xfs_send_namesp_t xfs_send_namesp;
xfs_send_unmount_t xfs_send_unmount;
} xfs_dmops_t;
#define XFS_SEND_DATA(mp, ev,bdp,off,len,fl,lock) \
(*(mp)->m_dm_ops.xfs_send_data)(ev,bdp,off,len,fl,lock)
#define XFS_SEND_MMAP(mp, vma,fl) \
(*(mp)->m_dm_ops.xfs_send_mmap)(vma,fl)
#define XFS_SEND_DESTROY(mp, bdp,right) \
(*(mp)->m_dm_ops.xfs_send_destroy)(bdp,right)
#define XFS_SEND_NAMESP(mp, ev,b1,r1,b2,r2,n1,n2,mode,rval,fl) \
(*(mp)->m_dm_ops.xfs_send_namesp)(ev,b1,r1,b2,r2,n1,n2,mode,rval,fl)
#define XFS_SEND_UNMOUNT(mp, vfsp,vp,right,mode,rval,fl) \
(*(mp)->m_dm_ops.xfs_send_unmount)(vfsp,vp,right,mode,rval,fl)
/*
* Prototypes and functions for the Quota Management subsystem.
*/
struct flid;
struct buf;
struct xfs_dquot;
struct xfs_dqtrxops;
struct xfs_quotainfo;
typedef int (*xfs_qminit_t)(struct xfs_mount *, uint *, uint *);
typedef int (*xfs_qmmount_t)(struct xfs_mount *, uint, uint);
typedef int (*xfs_qmunmount_t)(struct xfs_mount *);
typedef void (*xfs_qmdone_t)(struct xfs_mount *);
typedef void (*xfs_dqrele_t)(struct xfs_dquot *);
typedef int (*xfs_dqattach_t)(struct xfs_inode *, uint);
typedef void (*xfs_dqdetach_t)(struct xfs_inode *);
typedef int (*xfs_dqpurgeall_t)(struct xfs_mount *, uint);
typedef int (*xfs_dqvopalloc_t)(struct xfs_mount *,
struct xfs_inode *, uid_t, gid_t, uint,
struct xfs_dquot **, struct xfs_dquot **);
typedef void (*xfs_dqvopcreate_t)(struct xfs_trans *, struct xfs_inode *,
struct xfs_dquot *, struct xfs_dquot *);
typedef int (*xfs_dqvoprename_t)(struct xfs_inode **);
typedef struct xfs_dquot * (*xfs_dqvopchown_t)(
struct xfs_trans *, struct xfs_inode *,
struct xfs_dquot **, struct xfs_dquot *);
typedef int (*xfs_dqvopchownresv_t)(struct xfs_trans *, struct xfs_inode *,
struct xfs_dquot *, struct xfs_dquot *, uint);
typedef struct xfs_qmops {
xfs_qminit_t xfs_qminit;
xfs_qmdone_t xfs_qmdone;
xfs_qmmount_t xfs_qmmount;
xfs_qmunmount_t xfs_qmunmount;
xfs_dqrele_t xfs_dqrele;
xfs_dqattach_t xfs_dqattach;
xfs_dqdetach_t xfs_dqdetach;
xfs_dqpurgeall_t xfs_dqpurgeall;
xfs_dqvopalloc_t xfs_dqvopalloc;
xfs_dqvopcreate_t xfs_dqvopcreate;
xfs_dqvoprename_t xfs_dqvoprename;
xfs_dqvopchown_t xfs_dqvopchown;
xfs_dqvopchownresv_t xfs_dqvopchownresv;
struct xfs_dqtrxops *xfs_dqtrxops;
} xfs_qmops_t;
#define XFS_QM_INIT(mp, mnt, fl) \
(*(mp)->m_qm_ops.xfs_qminit)(mp, mnt, fl)
#define XFS_QM_MOUNT(mp, mnt, fl) \
(*(mp)->m_qm_ops.xfs_qmmount)(mp, mnt, fl)
#define XFS_QM_UNMOUNT(mp) \
(*(mp)->m_qm_ops.xfs_qmunmount)(mp)
#define XFS_QM_DONE(mp) \
(*(mp)->m_qm_ops.xfs_qmdone)(mp)
#define XFS_QM_DQRELE(mp, dq) \
(*(mp)->m_qm_ops.xfs_dqrele)(dq)
#define XFS_QM_DQATTACH(mp, ip, fl) \
(*(mp)->m_qm_ops.xfs_dqattach)(ip, fl)
#define XFS_QM_DQDETACH(mp, ip) \
(*(mp)->m_qm_ops.xfs_dqdetach)(ip)
#define XFS_QM_DQPURGEALL(mp, fl) \
(*(mp)->m_qm_ops.xfs_dqpurgeall)(mp, fl)
#define XFS_QM_DQVOPALLOC(mp, ip, uid, gid, fl, dq1, dq2) \
(*(mp)->m_qm_ops.xfs_dqvopalloc)(mp, ip, uid, gid, fl, dq1, dq2)
#define XFS_QM_DQVOPCREATE(mp, tp, ip, dq1, dq2) \
(*(mp)->m_qm_ops.xfs_dqvopcreate)(tp, ip, dq1, dq2)
#define XFS_QM_DQVOPRENAME(mp, ip) \
(*(mp)->m_qm_ops.xfs_dqvoprename)(ip)
#define XFS_QM_DQVOPCHOWN(mp, tp, ip, dqp, dq) \
(*(mp)->m_qm_ops.xfs_dqvopchown)(tp, ip, dqp, dq)
#define XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, dq1, dq2, fl) \
(*(mp)->m_qm_ops.xfs_dqvopchownresv)(tp, ip, dq1, dq2, fl)
/*
* Prototypes and functions for I/O core modularization.
*/
typedef int (*xfs_ioinit_t)(struct vfs *,
struct xfs_mount_args *, int);
......@@ -137,52 +241,38 @@ typedef struct xfs_ioops {
xfs_iodone_t xfs_iodone;
} xfs_ioops_t;
#define XFS_IOINIT(vfsp, args, flags) \
(*(mp)->m_io_ops.xfs_ioinit)(vfsp, args, flags)
#define XFS_BMAPI(mp, trans,io,bno,len,f,first,tot,mval,nmap,flist) \
(*(mp)->m_io_ops.xfs_bmapi_func) \
(trans,(io)->io_obj,bno,len,f,first,tot,mval,nmap,flist)
#define XFS_BMAP_EOF(mp, io, endoff, whichfork, eof) \
(*(mp)->m_io_ops.xfs_bmap_eof_func) \
((io)->io_obj, endoff, whichfork, eof)
#define XFS_IOMAP_WRITE_DIRECT(mp, io, offset, count, flags, mval, nmap, found)\
(*(mp)->m_io_ops.xfs_iomap_write_direct) \
((io)->io_obj, offset, count, flags, mval, nmap, found)
#define XFS_IOMAP_WRITE_DELAY(mp, io, offset, count, flags, mval, nmap) \
(*(mp)->m_io_ops.xfs_iomap_write_delay) \
((io)->io_obj, offset, count, flags, mval, nmap)
#define XFS_IOMAP_WRITE_ALLOCATE(mp, io, mval, nmap) \
(*(mp)->m_io_ops.xfs_iomap_write_allocate) \
((io)->io_obj, mval, nmap)
#define XFS_IOMAP_WRITE_UNWRITTEN(mp, io, offset, count) \
(*(mp)->m_io_ops.xfs_iomap_write_unwritten) \
((io)->io_obj, offset, count)
#define XFS_LCK_MAP_SHARED(mp, io) \
(*(mp)->m_io_ops.xfs_lck_map_shared)((io)->io_obj)
#define XFS_ILOCK(mp, io, mode) \
(*(mp)->m_io_ops.xfs_ilock)((io)->io_obj, mode)
#define XFS_ILOCK_NOWAIT(mp, io, mode) \
(*(mp)->m_io_ops.xfs_ilock_nowait)((io)->io_obj, mode)
#define XFS_IUNLOCK(mp, io, mode) \
(*(mp)->m_io_ops.xfs_unlock)((io)->io_obj, mode)
#define XFS_ILOCK_DEMOTE(mp, io, mode) \
(*(mp)->m_io_ops.xfs_ilock_demote)((io)->io_obj, mode)
#define XFS_SIZE(mp, io) \
(*(mp)->m_io_ops.xfs_size_func)((io)->io_obj)
#define XFS_IODONE(vfsp) \
(*(mp)->m_io_ops.xfs_iodone)(vfsp)
......@@ -284,13 +374,9 @@ typedef struct xfs_mount {
int m_chsize; /* size of next field */
struct xfs_chash *m_chash; /* fs private inode per-cluster
* hash table */
struct xfs_dmops m_dm_ops; /* vector of DMI ops */
struct xfs_qmops m_qm_ops; /* vector of XQM ops */
struct xfs_ioops m_io_ops; /* vector of I/O ops */
struct xfs_expinfo *m_expinfo; /* info to export to other
cells. */
uint64_t m_shadow_pinmask;
/* which bits matter in rpc
log item pin masks */
uint m_cxfstype; /* mounted shared, etc. */
lock_t m_freeze_lock; /* Lock for m_frozen */
uint m_frozen; /* FS frozen for shutdown or
* snapshot */
......@@ -482,11 +568,10 @@ extern void xfs_check_frozen(xfs_mount_t *, bhv_desc_t *, int);
extern struct vfsops xfs_vfsops;
extern struct vnodeops xfs_vnodeops;
extern struct xfs_dmops xfs_dmcore_xfs;
extern struct xfs_qmops xfs_qmcore_xfs;
extern struct xfs_ioops xfs_iocore_xfs;
extern struct vfsops xfs_qmops_xfs;
extern struct vfsops xfs_dmops_xfs;
extern int xfs_init(void);
extern void xfs_cleanup(void);
......
......@@ -29,154 +29,32 @@
*
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/
#include <xfs.h>
#define MNTOPT_QUOTA "quota" /* disk quotas (user) */
#define MNTOPT_NOQUOTA "noquota" /* no quotas */
#define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */
#define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */
#define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */
#define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */
#define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */
#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
STATIC int
xfs_qm_parseargs(
struct bhv_desc *bhv,
char *options,
struct xfs_mount_args *args,
int update)
{
size_t length;
char *local_options = options;
char *this_char;
int error;
int referenced = update;
while ((this_char = strsep(&local_options, ",")) != NULL) {
length = strlen(this_char);
if (local_options)
length++;
if (!strcmp(this_char, MNTOPT_NOQUOTA)) {
args->flags &= ~(XFSMNT_UQUOTAENF|XFSMNT_UQUOTA);
args->flags &= ~(XFSMNT_GQUOTAENF|XFSMNT_GQUOTA);
referenced = update;
} else if (!strcmp(this_char, MNTOPT_QUOTA) ||
!strcmp(this_char, MNTOPT_UQUOTA) ||
!strcmp(this_char, MNTOPT_USRQUOTA)) {
args->flags |= XFSMNT_UQUOTA | XFSMNT_UQUOTAENF;
referenced = 1;
} else if (!strcmp(this_char, MNTOPT_QUOTANOENF) ||
!strcmp(this_char, MNTOPT_UQUOTANOENF)) {
args->flags |= XFSMNT_UQUOTA;
args->flags &= ~XFSMNT_UQUOTAENF;
referenced = 1;
} else if (!strcmp(this_char, MNTOPT_GQUOTA) ||
!strcmp(this_char, MNTOPT_GRPQUOTA)) {
args->flags |= XFSMNT_GQUOTA | XFSMNT_GQUOTAENF;
referenced = 1;
} else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
args->flags |= XFSMNT_GQUOTA;
args->flags &= ~XFSMNT_GQUOTAENF;
referenced = 1;
} else {
if (local_options)
*(local_options-1) = ',';
continue;
}
while (length--)
*this_char++ = ',';
}
PVFS_PARSEARGS(BHV_NEXT(bhv), options, args, update, error);
if (!error && !referenced)
bhv_remove_vfsops(bhvtovfs(bhv), VFS_POSITION_QM);
return error;
}
STATIC int
xfs_qm_showargs(
struct bhv_desc *bhv,
struct seq_file *m)
{
struct vfs *vfsp = bhvtovfs(bhv);
struct xfs_mount *mp = XFS_VFSTOM(vfsp);
int error;
if (mp->m_qflags & XFS_UQUOTA_ACCT) {
(mp->m_qflags & XFS_UQUOTA_ENFD) ?
seq_puts(m, "," MNTOPT_USRQUOTA) :
seq_puts(m, "," MNTOPT_UQUOTANOENF);
}
if (mp->m_qflags & XFS_GQUOTA_ACCT) {
(mp->m_qflags & XFS_GQUOTA_ENFD) ?
seq_puts(m, "," MNTOPT_GRPQUOTA) :
seq_puts(m, "," MNTOPT_GQUOTANOENF);
}
if (!(mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT)))
seq_puts(m, "," MNTOPT_NOQUOTA);
PVFS_SHOWARGS(BHV_NEXT(bhv), m, error);
return error;
}
STATIC int
xfs_qm_mount(
struct bhv_desc *bhv,
struct xfs_mount_args *args,
struct cred *cr)
{
struct vfs *vfsp = bhvtovfs(bhv);
struct xfs_mount *mp = XFS_VFSTOM(vfsp);
int error;
if (args->flags & (XFSMNT_UQUOTA | XFSMNT_GQUOTA))
xfs_qm_mount_quotainit(mp, args->flags);
PVFS_MOUNT(BHV_NEXT(bhv), args, cr, error);
return error;
}
STATIC int
xfs_qm_syncall(
struct bhv_desc *bhv,
int flags,
cred_t *credp)
#ifndef CONFIG_XFS_QUOTA
STATIC struct xfs_dquot *
xfs_dqvopchown_default(
struct xfs_trans *tp,
struct xfs_inode *ip,
struct xfs_dquot **dqp,
struct xfs_dquot *dq)
{
struct vfs *vfsp = bhvtovfs(bhv);
struct xfs_mount *mp = XFS_VFSTOM(vfsp);
int error;
/*
* Get the Quota Manager to flush the dquots.
*/
if (XFS_IS_QUOTA_ON(mp)) {
if ((error = xfs_qm_sync(mp, flags))) {
/*
* If we got an IO error, we will be shutting down.
* So, there's nothing more for us to do here.
*/
ASSERT(error != EIO || XFS_FORCED_SHUTDOWN(mp));
if (XFS_FORCED_SHUTDOWN(mp)) {
return XFS_ERROR(error);
}
}
}
PVFS_SYNC(BHV_NEXT(bhv), flags, credp, error);
return error;
return NULL;
}
vfsops_t xfs_qmops_xfs = {
BHV_IDENTITY_INIT(VFS_BHV_QM, VFS_POSITION_QM),
.vfs_parseargs = xfs_qm_parseargs,
.vfs_showargs = xfs_qm_showargs,
.vfs_mount = xfs_qm_mount,
.vfs_sync = xfs_qm_syncall,
.vfs_quotactl = xfs_qm_quotactl,
xfs_qmops_t xfs_qmcore_xfs = {
.xfs_qminit = (xfs_qminit_t) fs_noerr,
.xfs_qmdone = (xfs_qmdone_t) fs_noerr,
.xfs_qmmount = (xfs_qmmount_t) fs_noerr,
.xfs_qmunmount = (xfs_qmunmount_t) fs_noerr,
.xfs_dqrele = (xfs_dqrele_t) fs_noerr,
.xfs_dqattach = (xfs_dqattach_t) fs_noerr,
.xfs_dqdetach = (xfs_dqdetach_t) fs_noerr,
.xfs_dqpurgeall = (xfs_dqpurgeall_t) fs_noerr,
.xfs_dqvopalloc = (xfs_dqvopalloc_t) fs_noerr,
.xfs_dqvopcreate = (xfs_dqvopcreate_t) fs_noerr,
.xfs_dqvoprename = (xfs_dqvoprename_t) fs_noerr,
.xfs_dqvopchown = xfs_dqvopchown_default,
.xfs_dqvopchownresv = (xfs_dqvopchownresv_t) fs_noerr,
};
#endif /* CONFIG_XFS_QUOTA */
/*
* Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -32,6 +32,12 @@
#ifndef __XFS_QUOTA_H__
#define __XFS_QUOTA_H__
/*
* The ondisk form of a dquot structure.
*/
#define XFS_DQUOT_MAGIC 0x4451 /* 'DQ' */
#define XFS_DQUOT_VERSION (u_int8_t)0x01 /* latest version number */
/*
* uid_t and gid_t are hard-coded to 32 bits in the inode.
* Hence, an 'id' in a dquot is 32 bits..
......@@ -46,6 +52,100 @@ typedef __int32_t xfs_dqid_t;
typedef __uint64_t xfs_qcnt_t;
typedef __uint16_t xfs_qwarncnt_t;
/*
* This is the main portion of the on-disk representation of quota
* information for a user. This is the q_core of the xfs_dquot_t that
* is kept in kernel memory. We pad this with some more expansion room
* to construct the on disk structure.
*/
typedef struct xfs_disk_dquot {
/*16*/ u_int16_t d_magic; /* dquot magic = XFS_DQUOT_MAGIC */
/*8 */ u_int8_t d_version; /* dquot version */
/*8 */ u_int8_t d_flags; /* XFS_DQ_USER/PROJ/GROUP */
/*32*/ xfs_dqid_t d_id; /* user,project,group id */
/*64*/ xfs_qcnt_t d_blk_hardlimit;/* absolute limit on disk blks */
/*64*/ xfs_qcnt_t d_blk_softlimit;/* preferred limit on disk blks */
/*64*/ xfs_qcnt_t d_ino_hardlimit;/* maximum # allocated inodes */
/*64*/ xfs_qcnt_t d_ino_softlimit;/* preferred inode limit */
/*64*/ xfs_qcnt_t d_bcount; /* disk blocks owned by the user */
/*64*/ xfs_qcnt_t d_icount; /* inodes owned by the user */
/*32*/ __int32_t d_itimer; /* zero if within inode limits if not,
this is when we refuse service */
/*32*/ __int32_t d_btimer; /* similar to above; for disk blocks */
/*16*/ xfs_qwarncnt_t d_iwarns; /* warnings issued wrt num inodes */
/*16*/ xfs_qwarncnt_t d_bwarns; /* warnings issued wrt disk blocks */
/*32*/ __int32_t d_pad0; /* 64 bit align */
/*64*/ xfs_qcnt_t d_rtb_hardlimit;/* absolute limit on realtime blks */
/*64*/ xfs_qcnt_t d_rtb_softlimit;/* preferred limit on RT disk blks */
/*64*/ xfs_qcnt_t d_rtbcount; /* realtime blocks owned */
/*32*/ __int32_t d_rtbtimer; /* similar to above; for RT disk blocks */
/*16*/ xfs_qwarncnt_t d_rtbwarns; /* warnings issued wrt RT disk blocks */
/*16*/ __uint16_t d_pad;
} xfs_disk_dquot_t;
/*
* This is what goes on disk. This is separated from the xfs_disk_dquot because
* carrying the unnecessary padding would be a waste of memory.
*/
typedef struct xfs_dqblk {
xfs_disk_dquot_t dd_diskdq; /* portion that lives incore as well */
char dd_fill[32]; /* filling for posterity */
} xfs_dqblk_t;
/*
* flags for q_flags field in the dquot.
*/
#define XFS_DQ_USER 0x0001 /* a user quota */
/* #define XFS_DQ_PROJ 0x0002 -- project quota (IRIX) */
#define XFS_DQ_GROUP 0x0004 /* a group quota */
#define XFS_DQ_FLOCKED 0x0008 /* flush lock taken */
#define XFS_DQ_DIRTY 0x0010 /* dquot is dirty */
#define XFS_DQ_WANT 0x0020 /* for lookup/reclaim race */
#define XFS_DQ_INACTIVE 0x0040 /* dq off mplist & hashlist */
#define XFS_DQ_MARKER 0x0080 /* sentinel */
/*
* In the worst case, when both user and group quotas are on,
* we can have a max of three dquots changing in a single transaction.
*/
#define XFS_DQUOT_LOGRES(mp) (sizeof(xfs_disk_dquot_t) * 3)
/*
* These are the structures used to lay out dquots and quotaoff
* records on the log. Quite similar to those of inodes.
*/
/*
* log format struct for dquots.
* The first two fields must be the type and size fitting into
* 32 bits : log_recovery code assumes that.
*/
typedef struct xfs_dq_logformat {
__uint16_t qlf_type; /* dquot log item type */
__uint16_t qlf_size; /* size of this item */
xfs_dqid_t qlf_id; /* usr/grp id number : 32 bits */
__int64_t qlf_blkno; /* blkno of dquot buffer */
__int32_t qlf_len; /* len of dquot buffer */
__uint32_t qlf_boffset; /* off of dquot in buffer */
} xfs_dq_logformat_t;
/*
* log format struct for QUOTAOFF records.
* The first two fields must be the type and size fitting into
* 32 bits : log_recovery code assumes that.
* We write two LI_QUOTAOFF logitems per quotaoff, the last one keeps a pointer
* to the first and ensures that the first logitem is taken out of the AIL
* only when the last one is securely committed.
*/
typedef struct xfs_qoff_logformat {
unsigned short qf_type; /* quotaoff log item type */
unsigned short qf_size; /* size of this item */
unsigned int qf_flags; /* USR and/or GRP */
char qf_pad[12]; /* padding for future */
} xfs_qoff_logformat_t;
/*
* Disk quotas status in m_qflags, and also sb_qflags. 16 bits.
*/
......@@ -140,7 +240,7 @@ typedef __uint16_t xfs_qwarncnt_t;
* The inode cannot go inactive as long a reference is kept, and
* therefore if dquot(s) were attached, they'll stay consistent.
* If, for example, the ownership of the inode changes while
* we didnt have the inode locked, the appropriate dquot(s) will be
* we didn't have the inode locked, the appropriate dquot(s) will be
* attached atomically.
*/
#define XFS_NOT_DQATTACHED(mp, ip) ((XFS_IS_UQUOTA_ON(mp) &&\
......@@ -164,188 +264,91 @@ typedef __uint16_t xfs_qwarncnt_t;
#define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME)
#ifdef __KERNEL__
#ifdef CONFIG_XFS_QUOTA
/*
* External Interface to the XFS disk quota subsystem.
* The structure kept inside the xfs_trans_t keep track of dquot changes
* within a transaction and apply them later.
*/
struct xfs_disk_dquot;
struct xfs_dqhash;
struct xfs_dquot;
struct xfs_inode;
struct xfs_mount;
struct xfs_trans;
typedef struct xfs_dqtrx {
struct xfs_dquot *qt_dquot; /* the dquot this refers to */
ulong qt_blk_res; /* blks reserved on a dquot */
ulong qt_blk_res_used; /* blks used from the reservation */
ulong qt_ino_res; /* inode reserved on a dquot */
ulong qt_ino_res_used; /* inodes used from the reservation */
long qt_bcount_delta; /* dquot blk count changes */
long qt_delbcnt_delta; /* delayed dquot blk count changes */
long qt_icount_delta; /* dquot inode count changes */
ulong qt_rtblk_res; /* # blks reserved on a dquot */
ulong qt_rtblk_res_used;/* # blks used from reservation */
long qt_rtbcount_delta;/* dquot realtime blk changes */
long qt_delrtb_delta; /* delayed RT blk count changes */
} xfs_dqtrx_t;
/*
* Quota Manager Interface.
* Dquot transaction functions, used if quota is enabled.
*/
extern struct xfs_qm *xfs_qm_init(void);
extern void xfs_qm_destroy(struct xfs_qm *);
extern int xfs_qm_dqflush_all(struct xfs_mount *, int);
extern int xfs_qm_dqattach(struct xfs_inode *, uint);
extern int xfs_qm_dqpurge_all(struct xfs_mount *, uint);
extern void xfs_qm_mount_quotainit(struct xfs_mount *, uint);
extern void xfs_qm_unmount_quotadestroy(struct xfs_mount *);
extern int xfs_qm_mount_quotas(struct xfs_mount *);
extern int xfs_qm_unmount_quotas(struct xfs_mount *);
extern void xfs_qm_dqdettach_inode(struct xfs_inode *);
extern int xfs_qm_sync(struct xfs_mount *, short);
/*
* Dquot interface.
*/
extern void xfs_dqlock(struct xfs_dquot *);
extern void xfs_dqunlock(struct xfs_dquot *);
extern void xfs_dqunlock_nonotify(struct xfs_dquot *);
extern void xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *);
extern void xfs_qm_dqput(struct xfs_dquot *);
extern void xfs_qm_dqrele(struct xfs_dquot *);
extern xfs_dqid_t xfs_qm_dqid(struct xfs_dquot *);
extern int xfs_qm_dqget(struct xfs_mount *,
struct xfs_inode *, xfs_dqid_t,
uint, uint, struct xfs_dquot **);
extern int xfs_qm_dqcheck(struct xfs_disk_dquot *,
xfs_dqid_t, uint, uint, char *);
/*
* Vnodeops specific code that should actually be _in_ xfs_vnodeops.c, but
* is here because it's nicer to keep vnodeops (therefore, XFS) lean
* and clean.
*/
extern struct xfs_dquot * xfs_qm_vop_chown(struct xfs_trans *,
struct xfs_inode *,
struct xfs_dquot **,
struct xfs_dquot *);
extern int xfs_qm_vop_dqalloc(struct xfs_mount *,
struct xfs_inode *,
uid_t, gid_t, uint,
struct xfs_dquot **,
struct xfs_dquot **);
extern int xfs_qm_vop_chown_reserve(struct xfs_trans *,
struct xfs_inode *,
struct xfs_dquot *,
struct xfs_dquot *,
uint);
extern int xfs_qm_vop_rename_dqattach(struct xfs_inode **);
extern void xfs_qm_vop_dqattach_and_dqmod_newinode(
struct xfs_trans *,
struct xfs_inode *,
struct xfs_dquot *,
struct xfs_dquot *);
/*
* Dquot Transaction interface
*/
extern void xfs_trans_alloc_dqinfo(struct xfs_trans *);
extern void xfs_trans_free_dqinfo(struct xfs_trans *);
extern void xfs_trans_dup_dqinfo(struct xfs_trans *,
struct xfs_trans *);
extern void xfs_trans_mod_dquot(struct xfs_trans *,
struct xfs_dquot *,
uint, long);
extern void xfs_trans_mod_dquot_byino(struct xfs_trans *,
struct xfs_inode *,
uint, long);
extern void xfs_trans_apply_dquot_deltas(struct xfs_trans *);
extern void xfs_trans_unreserve_and_mod_dquots(struct xfs_trans *);
extern int xfs_trans_reserve_quota_nblks(struct xfs_trans *,
struct xfs_inode *,
typedef void (*qo_dup_dqinfo_t)(struct xfs_trans *, struct xfs_trans *);
typedef void (*qo_mod_dquot_byino_t)(struct xfs_trans *,
struct xfs_inode *, uint, long);
typedef void (*qo_free_dqinfo_t)(struct xfs_trans *);
typedef void (*qo_apply_dquot_deltas_t)(struct xfs_trans *);
typedef void (*qo_unreserve_and_mod_dquots_t)(struct xfs_trans *);
typedef int (*qo_reserve_quota_nblks_t)(
struct xfs_trans *, struct xfs_mount *,
struct xfs_inode *, long, long, uint);
typedef int (*qo_reserve_quota_bydquots_t)(
struct xfs_trans *, struct xfs_mount *,
struct xfs_dquot *, struct xfs_dquot *,
long, long, uint);
extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *,
struct xfs_dquot *,
struct xfs_dquot *,
long, long, uint);
extern void xfs_trans_log_dquot(struct xfs_trans *,
struct xfs_dquot *);
extern void xfs_trans_dqjoin(struct xfs_trans *,
struct xfs_dquot *);
extern void xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint);
# define _XQM_ZONE_DESTROY(z) ((z)? kmem_cache_destroy(z) : (void)0)
#else
# define xfs_qm_init() (NULL)
# define xfs_qm_destroy(xqm) do { } while (0)
# define xfs_qm_dqflush_all(m,t) (ENOSYS)
# define xfs_qm_dqattach(i,t) (ENOSYS)
# define xfs_qm_dqpurge_all(m,t) (ENOSYS)
# define xfs_qm_mount_quotainit(m,t) do { } while (0)
# define xfs_qm_unmount_quotadestroy(m) do { } while (0)
# define xfs_qm_mount_quotas(m) (ENOSYS)
# define xfs_qm_unmount_quotas(m) (ENOSYS)
# define xfs_qm_dqdettach_inode(i) do { } while (0)
# define xfs_qm_sync(m,t) (ENOSYS)
# define xfs_dqlock(d) do { } while (0)
# define xfs_dqunlock(d) do { } while (0)
# define xfs_dqunlock_nonotify(d) do { } while (0)
# define xfs_dqlock2(d1,d2) do { } while (0)
# define xfs_qm_dqput(d) do { } while (0)
# define xfs_qm_dqrele(d) do { } while (0)
# define xfs_qm_dqid(d) (-1)
# define xfs_qm_dqget(m,i,di,t,f,d) (ENOSYS)
# define xfs_qm_dqcheck(dd,di,t,f,s) (ENOSYS)
# define xfs_trans_alloc_dqinfo(t) do { } while (0)
# define xfs_trans_free_dqinfo(t) do { } while (0)
# define xfs_trans_dup_dqinfo(t1,t2) do { } while (0)
# define xfs_trans_mod_dquot(t,d,f,x) do { } while (0)
# define xfs_trans_mod_dquot_byino(t,i,f,x) do { } while (0)
# define xfs_trans_apply_dquot_deltas(t) do { } while (0)
# define xfs_trans_unreserve_and_mod_dquots(t) do { } while (0)
# define xfs_trans_reserve_quota_nblks(t,i,nb,ni,f) (ENOSYS)
# define xfs_trans_reserve_quota_bydquots(t,x,y,b,i,f) (ENOSYS)
# define xfs_trans_log_dquot(t,d) do { } while (0)
# define xfs_trans_dqjoin(t,d) do { } while (0)
# define xfs_qm_dqrele_all_inodes(m,t) do { } while (0)
# define xfs_qm_vop_chown(t,i,d1,d2) (NULL)
# define xfs_qm_vop_dqalloc(m,i,u,g,f,d1,d2) (ENOSYS)
# define xfs_qm_vop_chown_reserve(t,i,d1,d2,f) (ENOSYS)
# define xfs_qm_vop_rename_dqattach(i) (ENOSYS)
# define xfs_qm_vop_dqattach_and_dqmod_newinode(t,i,x,y) do { } while (0)
# define _XQM_ZONE_DESTROY(z) do { } while (0)
#endif /* CONFIG_XFS_QUOTA */
/*
* Regular disk block quota reservations
*/
#define xfs_trans_reserve_blkquota(tp, ip, nblks) \
xfs_trans_reserve_quota_nblks(tp, ip, nblks, 0, XFS_QMOPT_RES_REGBLKS)
#define xfs_trans_reserve_blkquota_force(tp, ip, nblks) \
xfs_trans_reserve_quota_nblks(tp, ip, nblks, 0, \
XFS_QMOPT_RES_REGBLKS|XFS_QMOPT_FORCE_RES)
#define xfs_trans_unreserve_blkquota(tp, ip, nblks) \
(void)xfs_trans_reserve_quota_nblks(tp, ip, -(nblks), 0, XFS_QMOPT_RES_REGBLKS)
#define xfs_trans_reserve_quota(tp, udq, gdq, nb, ni, f) \
xfs_trans_reserve_quota_bydquots(tp, udq, gdq, nb, ni, f|XFS_QMOPT_RES_REGBLKS)
#define xfs_trans_unreserve_quota(tp, ud, gd, b, i, f) \
xfs_trans_reserve_quota_bydquots(tp, ud, gd, -(b), -(i), f|XFS_QMOPT_RES_REGBLKS)
/*
* Realtime disk block quota reservations
*/
#define xfs_trans_reserve_rtblkquota(mp, tp, ip, nblks) \
xfs_trans_reserve_quota_nblks(tp, ip, nblks, 0, XFS_QMOPT_RES_RTBLKS)
#define xfs_trans_unreserve_rtblkquota(tp, ip, nblks) \
(void)xfs_trans_reserve_quota_nblks(tp, ip, -(nblks), 0, XFS_QMOPT_RES_RTBLKS)
#define xfs_trans_reserve_rtquota(mp, tp, uq, pq, blks, f) \
xfs_trans_reserve_quota_bydquots(mp, tp, uq, pq, blks, 0, f|XFS_QMOPT_RES_RTBLKS)
#define xfs_trans_unreserve_rtquota(tp, uq, pq, blks) \
xfs_trans_reserve_quota_bydquots(tp, uq, pq, -(blks), XFS_QMOPT_RES_RTBLKS)
#endif /* __KERNEL__ */
typedef struct xfs_dqtrxops {
qo_dup_dqinfo_t qo_dup_dqinfo;
qo_free_dqinfo_t qo_free_dqinfo;
qo_mod_dquot_byino_t qo_mod_dquot_byino;
qo_apply_dquot_deltas_t qo_apply_dquot_deltas;
qo_reserve_quota_nblks_t qo_reserve_quota_nblks;
qo_reserve_quota_bydquots_t qo_reserve_quota_bydquots;
qo_unreserve_and_mod_dquots_t qo_unreserve_and_mod_dquots;
} xfs_dqtrxops_t;
#define XFS_DQTRXOP(mp, tp, op, args...) \
((mp)->m_qm_ops.xfs_dqtrxops ? \
((mp)->m_qm_ops.xfs_dqtrxops->op)(tp, ## args) : 0)
#define XFS_TRANS_DUP_DQINFO(mp, otp, ntp) \
XFS_DQTRXOP(mp, otp, qo_dup_dqinfo, ntp)
#define XFS_TRANS_FREE_DQINFO(mp, tp) \
XFS_DQTRXOP(mp, tp, qo_free_dqinfo)
#define XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, field, delta) \
XFS_DQTRXOP(mp, tp, qo_mod_dquot_byino, ip, field, delta)
#define XFS_TRANS_APPLY_DQUOT_DELTAS(mp, tp) \
XFS_DQTRXOP(mp, tp, qo_apply_dquot_deltas)
#define XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, nblks, ninos, fl) \
XFS_DQTRXOP(mp, tp, qo_reserve_quota_nblks, mp, ip, nblks, ninos, fl)
#define XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, nb, ni, fl) \
XFS_DQTRXOP(mp, tp, qo_reserve_quota_bydquots, mp, ud, gd, nb, ni, fl)
#define XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp) \
XFS_DQTRXOP(mp, tp, qo_unreserve_and_mod_dquots)
#define XFS_TRANS_RESERVE_BLKQUOTA(mp, tp, ip, nblks) \
XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, nblks, 0, \
XFS_QMOPT_RES_REGBLKS)
#define XFS_TRANS_RESERVE_BLKQUOTA_FORCE(mp, tp, ip, nblks) \
XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, nblks, 0, \
XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES)
#define XFS_TRANS_UNRESERVE_BLKQUOTA(mp, tp, ip, nblks) \
XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, -(nblks), 0, \
XFS_QMOPT_RES_REGBLKS)
#define XFS_TRANS_RESERVE_QUOTA(mp, tp, ud, gd, nb, ni, f) \
XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, nb, ni, \
f | XFS_QMOPT_RES_REGBLKS)
#define XFS_TRANS_UNRESERVE_QUOTA(mp, tp, ud, gd, nb, ni, f) \
XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, -(nb), -(ni), \
f | XFS_QMOPT_RES_REGBLKS)
extern int xfs_qm_dqcheck(xfs_disk_dquot_t *, xfs_dqid_t, uint, uint, char *);
extern struct bhv_vfsops xfs_qmops;
extern void xfs_qm_init(void);
extern void xfs_qm_exit(void);
#endif /* __XFS_QUOTA_H__ */
......@@ -261,11 +261,12 @@ xfs_rename(
src_dp = XFS_BHVTOI(src_dir_bdp);
target_dp = XFS_BHVTOI(target_dir_bdp);
mp = src_dp->i_mount;
if (DM_EVENT_ENABLED(src_dir_vp->v_vfsp, src_dp, DM_EVENT_RENAME) ||
DM_EVENT_ENABLED(target_dir_vp->v_vfsp,
target_dp, DM_EVENT_RENAME)) {
error = dm_send_namesp_event(DM_EVENT_RENAME,
error = XFS_SEND_NAMESP(mp, DM_EVENT_RENAME,
src_dir_bdp, DM_RIGHT_NULL,
target_dir_bdp, DM_RIGHT_NULL,
src_name, target_name,
......@@ -323,7 +324,6 @@ xfs_rename(
xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED);
XFS_BMAP_INIT(&free_list, &first_block);
mp = src_dp->i_mount;
tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME);
cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
spaceres = XFS_RENAME_SPACE_RES(mp, target_namelen);
......@@ -343,13 +343,11 @@ xfs_rename(
/*
* Attach the dquots to the inodes
*/
if (XFS_IS_QUOTA_ON(mp)) {
if ((error = xfs_qm_vop_rename_dqattach(inodes))) {
if ((error = XFS_QM_DQVOPRENAME(mp, inodes))) {
xfs_trans_cancel(tp, cancel_flags);
rename_which_error_return = __LINE__;
goto rele_return;
}
}
/*
* Reacquire the inode locks we dropped above.
......@@ -625,7 +623,7 @@ xfs_rename(
if (DM_EVENT_ENABLED(src_dir_vp->v_vfsp, src_dp, DM_EVENT_POSTRENAME) ||
DM_EVENT_ENABLED(target_dir_vp->v_vfsp,
target_dp, DM_EVENT_POSTRENAME)) {
(void) dm_send_namesp_event(DM_EVENT_POSTRENAME,
(void) XFS_SEND_NAMESP (mp, DM_EVENT_POSTRENAME,
src_dir_bdp, DM_RIGHT_NULL,
target_dir_bdp, DM_RIGHT_NULL,
src_name, target_name,
......
......@@ -173,11 +173,7 @@ xfs_trans_dup(
ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
tp->t_rtx_res = tp->t_rtx_res_used;
/*
* dup the dquot stuff too.
*/
if (tp->t_dqinfo)
xfs_trans_dup_dqinfo(tp, ntp);
XFS_TRANS_DUP_DQINFO(tp->t_mountp, tp, ntp);
atomic_inc(&tp->t_mountp->m_active_trans);
return ntp;
......@@ -703,9 +699,7 @@ xfs_trans_commit(
* means is that we have some (non-persistent) quota
* reservations that need to be unreserved.
*/
if (tp->t_dqinfo && (tp->t_flags & XFS_TRANS_DQ_DIRTY)) {
xfs_trans_unreserve_and_mod_dquots(tp);
}
XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp);
if (tp->t_ticket) {
commit_lsn = xfs_log_done(mp, tp->t_ticket,
NULL, log_flags);
......@@ -733,9 +727,7 @@ xfs_trans_commit(
if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
xfs_trans_apply_sb_deltas(tp);
}
if (tp->t_flags & XFS_TRANS_DQ_DIRTY) {
xfs_trans_apply_dquot_deltas(tp);
}
XFS_TRANS_APPLY_DQUOT_DELTAS(mp, tp);
/*
* Ask each log item how many log_vector entries it will
......@@ -955,9 +947,7 @@ xfs_trans_uncommit(
}
xfs_trans_unreserve_and_mod_sb(tp);
if (tp->t_dqinfo && (tp->t_flags & XFS_TRANS_DQ_DIRTY)) {
xfs_trans_unreserve_and_mod_dquots(tp);
}
XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(tp->t_mountp, tp);
xfs_trans_free_items(tp, flags);
xfs_trans_free_busy(tp);
......@@ -1079,9 +1069,7 @@ xfs_trans_cancel(
}
#endif
xfs_trans_unreserve_and_mod_sb(tp);
if (tp->t_dqinfo && (tp->t_flags & XFS_TRANS_DQ_DIRTY))
xfs_trans_unreserve_and_mod_dquots(tp);
XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(tp->t_mountp, tp);
if (tp->t_ticket) {
if (flags & XFS_TRANS_RELEASE_LOG_RES) {
......@@ -1110,8 +1098,7 @@ xfs_trans_free(
xfs_trans_t *tp)
{
atomic_dec(&tp->t_mountp->m_active_trans);
if (tp->t_dqinfo)
xfs_trans_free_dqinfo(tp);
XFS_TRANS_FREE_DQINFO(tp->t_mountp, tp);
kmem_zone_free(xfs_trans_zone, tp);
}
......
......@@ -232,7 +232,7 @@ xfs_dir_ialloc(
xfs_buf_relse(ialloc_context);
if (dqinfo) {
tp->t_dqinfo = dqinfo;
xfs_trans_free_dqinfo(tp);
XFS_TRANS_FREE_DQINFO(tp->t_mountp, tp);
}
*tpp = ntp;
*ipp = NULL;
......@@ -254,7 +254,7 @@ xfs_dir_ialloc(
*ipp = NULL;
return code;
}
xfs_trans_bjoin (tp, ialloc_context);
xfs_trans_bjoin(tp, ialloc_context);
/*
* Call ialloc again. Since we've locked out all
......
......@@ -68,7 +68,6 @@ xfs_init(void)
spinlock_init(&xfs_dabuf_global_lock, "xfsda");
#endif
mutex_init(&xfs_uuidtabmon, MUTEX_DEFAULT, "xfs_uuidtab");
mutex_init(&xfs_Gqm_lock, MUTEX_DEFAULT, "xfs_qmlock");
/*
* Initialize all of the zone allocators we use.
......@@ -175,8 +174,6 @@ xfs_cleanup(void)
kmem_cache_destroy(xfs_ifork_zone);
kmem_cache_destroy(xfs_ili_zone);
kmem_cache_destroy(xfs_chashlist_zone);
_XQM_ZONE_DESTROY(qm_dqzone);
_XQM_ZONE_DESTROY(qm_dqtrxzone);
_ACL_ZONE_DESTROY(xfs_acl_zone);
#if (defined(DEBUG) || defined(CONFIG_XFS_VNODE_TRACING))
ktrace_uninit();
......@@ -389,6 +386,7 @@ xfs_mount(
cred_t *credp)
{
struct vfs *vfsp = bhvtovfs(bhvp);
struct bhv_desc *p;
struct xfs_mount *mp = XFS_BHVTOM(bhvp);
struct block_device *ddev, *logdev, *rtdev;
int ronly = (vfsp->vfs_flag & VFS_RDONLY);
......@@ -421,28 +419,43 @@ xfs_mount(
}
}
mp->m_io_ops = xfs_iocore_xfs;
/*
* Setup xfs_mount function vectors from available behaviors
*/
p = vfs_bhv_lookup(vfsp, VFS_POSITION_DM);
mp->m_dm_ops = p ? *(xfs_dmops_t *) vfs_bhv_custom(p) : xfs_dmcore_xfs;
p = vfs_bhv_lookup(vfsp, VFS_POSITION_QM);
mp->m_qm_ops = p ? *(xfs_qmops_t *) vfs_bhv_custom(p) : xfs_qmcore_xfs;
p = vfs_bhv_lookup(vfsp, VFS_POSITION_IO);
mp->m_io_ops = p ? *(xfs_ioops_t *) vfs_bhv_custom(p) : xfs_iocore_xfs;
/*
* Setup xfs_mount buffer target pointers
*/
mp->m_ddev_targp = xfs_alloc_buftarg(ddev);
if (rtdev)
mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev);
mp->m_logdev_targp = (logdev && logdev != ddev) ?
xfs_alloc_buftarg(logdev) : mp->m_ddev_targp;
/*
* Setup flags based on mount(2) options and then the superblock
*/
error = xfs_start_flags(args, mp, ronly);
if (error)
goto error;
error = xfs_readsb(mp);
if (error)
goto error;
error = xfs_finish_flags(args, mp, ronly);
if (error) {
xfs_freesb(mp);
goto error;
}
/*
* Setup xfs_mount buffer target pointers based on superblock
*/
xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize,
mp->m_sb.sb_sectsize);
if (logdev && logdev != ddev) {
......@@ -531,16 +544,15 @@ xfs_unmount(
int flags,
cred_t *credp)
{
xfs_mount_t *mp;
xfs_inode_t *rip;
vnode_t *rvp = 0;
struct vfs *vfsp = bhvtovfs(bdp);
xfs_mount_t *mp = XFS_BHVTOM(bdp);
xfs_inode_t *rip;
vnode_t *rvp;
int unmount_event_wanted = 0;
int unmount_event_flags = 0;
int xfs_unmountfs_needed = 0;
int error;
mp = XFS_BHVTOM(bdp);
rip = mp->m_rootip;
rvp = XFS_ITOV(rip);
......@@ -548,7 +560,7 @@ xfs_unmount(
bhv_desc_t *rbdp;
rbdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(rvp), &xfs_vnodeops);
error = dm_send_namesp_event(DM_EVENT_PREUNMOUNT,
error = XFS_SEND_NAMESP(mp, DM_EVENT_PREUNMOUNT,
rbdp, DM_RIGHT_NULL, rbdp, DM_RIGHT_NULL,
NULL, NULL, 0, 0,
(mp->m_dmevmask & (1<<DM_EVENT_PREUNMOUNT))?
......@@ -601,9 +613,9 @@ xfs_unmount(
*/
if (unmount_event_wanted) {
/* Note: mp structure must still exist for
* dm_send_unmount_event() call.
* XFS_SEND_UNMOUNT() call.
*/
dm_send_unmount_event(vfsp, error == 0 ? rvp : NULL,
XFS_SEND_UNMOUNT(mp, vfsp, error == 0 ? rvp : NULL,
DM_RIGHT_NULL, 0, error, unmount_event_flags);
}
if (xfs_unmountfs_needed) {
......@@ -679,7 +691,7 @@ xfs_unmount_flush(
* Release dquot that rootinode, rbmino and rsumino might be holding,
* flush and purge the quota inodes.
*/
error = xfs_qm_unmount_quotas(mp);
error = XFS_QM_UNMOUNT(mp);
if (error == EFSCORRUPTED)
goto fscorrupt_out2;
......
......@@ -271,10 +271,8 @@ xfs_setattr(
int timeflags = 0;
vnode_t *vp;
xfs_prid_t projid=0, iprojid=0;
int privileged;
int mandlock_before, mandlock_after;
uint qflags;
xfs_dquot_t *udqp, *gdqp, *olddquot1, *olddquot2;
struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2;
int file_owner;
vp = BHV_TO_VNODE(bdp);
......@@ -319,7 +317,8 @@ xfs_setattr(
* because the i_*dquot fields will get updated anyway.
*/
if (XFS_IS_QUOTA_ON(mp) && (mask & (XFS_AT_UID|XFS_AT_GID))) {
qflags = 0;
uint qflags = 0;
if (mask & XFS_AT_UID) {
uid = vap->va_uid;
qflags |= XFS_QMOPT_UQUOTA;
......@@ -339,8 +338,8 @@ xfs_setattr(
*/
ASSERT(udqp == NULL);
ASSERT(gdqp == NULL);
if ((code = xfs_qm_vop_dqalloc(mp, ip, uid, gid, qflags,
&udqp, &gdqp)))
code = XFS_QM_DQVOPALLOC(mp, ip, uid,gid, qflags, &udqp, &gdqp);
if (code)
return (code);
}
......@@ -365,7 +364,7 @@ xfs_setattr(
} else {
if (DM_EVENT_ENABLED (vp->v_vfsp, ip, DM_EVENT_TRUNCATE) &&
!(flags & ATTR_DMI)) {
code = xfs_dm_send_data_event (DM_EVENT_TRUNCATE, bdp,
code = XFS_SEND_DATA(mp, DM_EVENT_TRUNCATE, bdp,
vap->va_size, 0, AT_DELAY_FLAG(flags), NULL);
if (code) {
lock_flags = 0;
......@@ -482,15 +481,10 @@ xfs_setattr(
if ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) ||
(XFS_IS_GQUOTA_ON(mp) && igid != gid)) {
ASSERT(tp);
/*
* XXX:casey - This may result in unnecessary auditing.
*/
privileged = capable(CAP_FOWNER);
if ((code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
privileged ?
XFS_QMOPT_FORCE_RES :
0)))
/* out of quota */
code = XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, udqp, gdqp,
capable(CAP_FOWNER) ?
XFS_QMOPT_FORCE_RES : 0);
if (code) /* out of quota */
goto error_return;
}
}
......@@ -520,11 +514,9 @@ xfs_setattr(
/*
* Make sure that the dquots are attached to the inode.
*/
if (XFS_IS_QUOTA_ON(mp) && XFS_NOT_DQATTACHED(mp, ip)) {
if ((code = xfs_qm_dqattach(ip, XFS_QMOPT_ILOCKED)))
if ((code = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED)))
goto error_return;
}
}
/*
* Change file access or modified times.
......@@ -730,13 +722,8 @@ xfs_setattr(
if (XFS_IS_UQUOTA_ON(mp)) {
ASSERT(mask & XFS_AT_UID);
ASSERT(udqp);
ASSERT(xfs_qm_dqid(udqp) == (xfs_dqid_t)uid);
olddquot1 = xfs_qm_vop_chown(tp, ip,
&ip->i_udquot,
udqp);
/*
* We'll dqrele olddquot at the end.
*/
olddquot1 = XFS_QM_DQVOPCHOWN(mp, tp, ip,
&ip->i_udquot, udqp);
}
ip->i_d.di_uid = uid;
}
......@@ -744,10 +731,8 @@ xfs_setattr(
if (XFS_IS_GQUOTA_ON(mp)) {
ASSERT(mask & XFS_AT_GID);
ASSERT(gdqp);
ASSERT(xfs_qm_dqid(gdqp) == gid);
olddquot2 = xfs_qm_vop_chown(tp, ip,
&ip->i_gdquot,
gdqp);
olddquot2 = XFS_QM_DQVOPCHOWN(mp, tp, ip,
&ip->i_gdquot, gdqp);
}
ip->i_d.di_gid = gid;
}
......@@ -802,9 +787,6 @@ xfs_setattr(
ip->i_d.di_flags = 0;
if (vap->va_xflags & XFS_XFLAG_REALTIME) {
ip->i_d.di_flags |= XFS_DIFLAG_REALTIME;
/* This is replicated in the io core for
* CXFS use
*/
ip->i_iocore.io_flags |= XFS_IOCORE_RT;
}
/* can't set PREALLOC this way, just ignore it */
......@@ -866,16 +848,12 @@ xfs_setattr(
xfs_iunlock(ip, lock_flags);
/*
* release any dquot(s) inode had kept before chown
* Release any dquot(s) the inode had kept before chown.
*/
if (olddquot1)
xfs_qm_dqrele(olddquot1);
if (olddquot2)
xfs_qm_dqrele(olddquot2);
if (udqp)
xfs_qm_dqrele(udqp);
if (gdqp)
xfs_qm_dqrele(gdqp);
XFS_QM_DQRELE(mp, olddquot1);
XFS_QM_DQRELE(mp, olddquot2);
XFS_QM_DQRELE(mp, udqp);
XFS_QM_DQRELE(mp, gdqp);
if (code) {
return code;
......@@ -883,7 +861,7 @@ xfs_setattr(
if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_ATTRIBUTE) &&
!(flags & ATTR_DMI)) {
(void) dm_send_namesp_event (DM_EVENT_ATTRIBUTE, bdp, DM_RIGHT_NULL,
(void) XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, bdp, DM_RIGHT_NULL,
NULL, DM_RIGHT_NULL, NULL, NULL,
0, 0, AT_DELAY_FLAG(flags));
}
......@@ -893,10 +871,8 @@ xfs_setattr(
commit_flags |= XFS_TRANS_ABORT;
/* FALLTHROUGH */
error_return:
if (udqp)
xfs_qm_dqrele(udqp);
if (gdqp)
xfs_qm_dqrele(gdqp);
XFS_QM_DQRELE(mp, udqp);
XFS_QM_DQRELE(mp, gdqp);
if (tp) {
xfs_trans_cancel(tp, commit_flags);
}
......@@ -1286,14 +1262,8 @@ xfs_inactive_free_eofblocks(
/*
* Attach the dquots to the inode up front.
*/
if (XFS_IS_QUOTA_ON(mp) &&
ip->i_ino != mp->m_sb.sb_uquotino &&
ip->i_ino != mp->m_sb.sb_gquotino) {
if (XFS_NOT_DQATTACHED(mp, ip)) {
if ((error = xfs_qm_dqattach(ip, 0)))
if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
return (error);
}
}
/*
* There are blocks after the end of file.
......@@ -1683,7 +1653,7 @@ xfs_inactive(
if (ip->i_d.di_nlink == 0 &&
DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_DESTROY)) {
(void) dm_send_destroy_event(bdp, DM_RIGHT_NULL);
(void) XFS_SEND_DESTROY(mp, bdp, DM_RIGHT_NULL);
}
error = 0;
......@@ -1709,14 +1679,9 @@ xfs_inactive(
ASSERT(ip->i_d.di_nlink == 0);
if (XFS_IS_QUOTA_ON(mp) &&
ip->i_ino != mp->m_sb.sb_uquotino &&
ip->i_ino != mp->m_sb.sb_gquotino) {
if (XFS_NOT_DQATTACHED(mp, ip)) {
if ((error = xfs_qm_dqattach(ip, 0)))
if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
return (VN_INACTIVE_CACHE);
}
}
tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
if (truncate) {
/*
......@@ -1826,20 +1791,18 @@ xfs_inactive(
* might do that, we need to make sure. Otherwise the
* inode might be lost for a long time or forever.
*/
if (!XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
if (!XFS_FORCED_SHUTDOWN(mp)) {
cmn_err(CE_NOTE,
"xfs_inactive: xfs_ifree() returned an error = %d on %s",
error,tp->t_mountp->m_fsname);
xfs_force_shutdown(tp->t_mountp, XFS_METADATA_IO_ERROR);
error, mp->m_fsname);
xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR);
}
xfs_trans_cancel(tp, commit_flags | XFS_TRANS_ABORT);
} else {
/*
* Credit the quota account(s). The inode is gone.
*/
if (XFS_IS_QUOTA_ON(tp->t_mountp))
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT,
-1);
XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
/*
* Just ignore errors at this point. There is
......@@ -1850,8 +1813,7 @@ xfs_inactive(
/*
* Release the dquots held by inode, if any.
*/
if (ip->i_udquot || ip->i_gdquot)
xfs_qm_dqdettach_inode(ip);
XFS_QM_DQDETACH(mp, ip);
xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
......@@ -1925,7 +1887,7 @@ xfs_create(
uint cancel_flags;
int committed;
xfs_prid_t prid;
xfs_dquot_t *udqp, *gdqp;
struct xfs_dquot *udqp, *gdqp;
uint resblks;
int dm_di_mode;
int namelen;
......@@ -1935,22 +1897,22 @@ xfs_create(
vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
dp = XFS_BHVTOI(dir_bdp);
mp = dp->i_mount;
dm_di_mode = vap->va_mode|VTTOIF(vap->va_type);
namelen = VNAMELEN(dentry);
if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) {
error = dm_send_namesp_event(DM_EVENT_CREATE,
error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE,
dir_bdp, DM_RIGHT_NULL, NULL,
DM_RIGHT_NULL, name, NULL,
dm_di_mode, 0, 0);
if (error)
return error;
dm_event_sent = 1;
}
mp = dp->i_mount;
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
......@@ -1965,14 +1927,10 @@ xfs_create(
/*
* Make sure that we have allocated dquot(s) on disk.
*/
if (XFS_IS_QUOTA_ON(mp)) {
error = xfs_qm_vop_dqalloc(mp, dp,
current->fsuid, current->fsgid,
XFS_QMOPT_QUOTALL|XFS_QMOPT_INHERIT,
&udqp, &gdqp);
error = XFS_QM_DQVOPALLOC(mp, dp, current->fsuid, current->fsgid,
XFS_QMOPT_QUOTALL|XFS_QMOPT_INHERIT, &udqp, &gdqp);
if (error)
goto std_return;
}
ip = NULL;
dp_joined_to_trans = B_FALSE;
......@@ -2008,13 +1966,10 @@ xfs_create(
/*
* Reserve disk quota and the inode.
*/
if (XFS_IS_QUOTA_ON(mp)) {
if (xfs_trans_reserve_quota(tp, udqp, gdqp, resblks,
1, 0)) {
error = EDQUOT;
error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0);
if (error)
goto error_return;
}
}
if (resblks == 0 &&
(error = XFS_DIR_CANENTER(mp, tp, dp, name, namelen)))
goto error_return;
......@@ -2074,9 +2029,7 @@ xfs_create(
* These ids of the inode couldn't have changed since the new
* inode has been locked ever since it was created.
*/
if (XFS_IS_QUOTA_ON(mp))
xfs_qm_vop_dqattach_and_dqmod_newinode(tp, ip, udqp,
gdqp);
XFS_QM_DQVOPCREATE(mp, tp, ip, udqp, gdqp);
/*
* xfs_trans_commit normally decrements the vnode ref count
......@@ -2099,10 +2052,8 @@ xfs_create(
goto error_return;
}
if (udqp)
xfs_qm_dqrele(udqp);
if (gdqp)
xfs_qm_dqrele(gdqp);
XFS_QM_DQRELE(mp, udqp);
XFS_QM_DQRELE(mp, gdqp);
/*
* Propogate the fact that the vnode changed after the
......@@ -2118,7 +2069,7 @@ xfs_create(
if ( (*vpp || (error != 0 && dm_event_sent != 0)) &&
DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp),
DM_EVENT_POSTCREATE)) {
(void) dm_send_namesp_event(DM_EVENT_POSTCREATE,
(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE,
dir_bdp, DM_RIGHT_NULL,
*vpp ? vn_bhv_lookup_unlocked(VN_BHV_HEAD(vp), &xfs_vnodeops):NULL,
DM_RIGHT_NULL, name, NULL,
......@@ -2136,10 +2087,8 @@ xfs_create(
if (!dp_joined_to_trans && (dp != NULL))
xfs_iunlock(dp, XFS_ILOCK_EXCL);
if (udqp)
xfs_qm_dqrele(udqp);
if (gdqp)
xfs_qm_dqrele(gdqp);
XFS_QM_DQRELE(mp, udqp);
XFS_QM_DQRELE(mp, gdqp);
goto std_return;
......@@ -2153,10 +2102,8 @@ xfs_create(
xfs_trans_cancel(tp, cancel_flags);
IRELE(ip);
if (udqp)
xfs_qm_dqrele(udqp);
if (gdqp)
xfs_qm_dqrele(gdqp);
XFS_QM_DQRELE(mp, udqp);
XFS_QM_DQRELE(mp, gdqp);
goto std_return;
}
......@@ -2437,8 +2384,8 @@ xfs_remove(
namelen = VNAMELEN(dentry);
if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_REMOVE)) {
error = dm_send_namesp_event(DM_EVENT_REMOVE, dir_bdp, DM_RIGHT_NULL,
NULL, DM_RIGHT_NULL,
error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, dir_bdp,
DM_RIGHT_NULL, NULL, DM_RIGHT_NULL,
name, NULL, 0, 0, 0);
if (error)
return error;
......@@ -2471,18 +2418,14 @@ xfs_remove(
ITRACE(ip);
if (XFS_IS_QUOTA_ON(mp)) {
ASSERT(! error);
if (XFS_NOT_DQATTACHED(mp, dp))
error = xfs_qm_dqattach(dp, 0);
if (!error && dp != ip && XFS_NOT_DQATTACHED(mp, ip))
error = xfs_qm_dqattach(ip, 0);
error = XFS_QM_DQATTACH(mp, dp, 0);
if (!error && dp != ip)
error = XFS_QM_DQATTACH(mp, ip, 0);
if (error) {
REMOVE_DEBUG_TRACE(__LINE__);
IRELE(ip);
goto std_return;
}
}
tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE);
cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
......@@ -2606,7 +2549,7 @@ xfs_remove(
std_return:
if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp,
DM_EVENT_POSTREMOVE)) {
(void) dm_send_namesp_event(DM_EVENT_POSTREMOVE,
(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE,
dir_bdp, DM_RIGHT_NULL,
NULL, DM_RIGHT_NULL,
name, NULL, dm_di_mode, error, 0);
......@@ -2690,7 +2633,7 @@ xfs_link(
return XFS_ERROR(EIO);
if (DM_EVENT_ENABLED(src_vp->v_vfsp, tdp, DM_EVENT_LINK)) {
error = dm_send_namesp_event(DM_EVENT_LINK,
error = XFS_SEND_NAMESP(mp, DM_EVENT_LINK,
target_dir_bdp, DM_RIGHT_NULL,
src_bdp, DM_RIGHT_NULL,
target_name, NULL, 0, 0, 0);
......@@ -2700,15 +2643,11 @@ xfs_link(
/* Return through std_return after this point. */
if (XFS_IS_QUOTA_ON(mp)) {
error = 0;
if (XFS_NOT_DQATTACHED(mp, sip))
error = xfs_qm_dqattach(sip, 0);
if (!error && sip != tdp && XFS_NOT_DQATTACHED(mp, tdp))
error = xfs_qm_dqattach(tdp, 0);
error = XFS_QM_DQATTACH(mp, sip, 0);
if (!error && sip != tdp)
error = XFS_QM_DQATTACH(mp, tdp, 0);
if (error)
goto std_return;
}
tp = xfs_trans_alloc(mp, XFS_TRANS_LINK);
cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
......@@ -2798,7 +2737,7 @@ xfs_link(
std_return:
if (DM_EVENT_ENABLED(src_vp->v_vfsp, sip,
DM_EVENT_POSTLINK)) {
(void) dm_send_namesp_event(DM_EVENT_POSTLINK,
(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTLINK,
target_dir_bdp, DM_RIGHT_NULL,
src_bdp, DM_RIGHT_NULL,
target_name, NULL, 0, error, 0);
......@@ -2813,8 +2752,6 @@ xfs_link(
goto std_return;
}
/*
* xfs_mkdir
*
......@@ -2844,7 +2781,7 @@ xfs_mkdir(
boolean_t created = B_FALSE;
int dm_event_sent = 0;
xfs_prid_t prid;
xfs_dquot_t *udqp, *gdqp;
struct xfs_dquot *udqp, *gdqp;
uint resblks;
int dm_di_mode;
int dir_namelen;
......@@ -2863,7 +2800,7 @@ xfs_mkdir(
dm_di_mode = vap->va_mode|VTTOIF(vap->va_type);
if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) {
error = dm_send_namesp_event(DM_EVENT_CREATE,
error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE,
dir_bdp, DM_RIGHT_NULL, NULL,
DM_RIGHT_NULL, dir_name, NULL,
dm_di_mode, 0, 0);
......@@ -2886,14 +2823,10 @@ xfs_mkdir(
/*
* Make sure that we have allocated dquot(s) on disk.
*/
if (XFS_IS_QUOTA_ON(mp)) {
error = xfs_qm_vop_dqalloc(mp, dp,
current->fsuid, current->fsgid,
XFS_QMOPT_QUOTALL|XFS_QMOPT_INHERIT,
&udqp, &gdqp);
error = XFS_QM_DQVOPALLOC(mp, dp, current->fsuid, current->fsgid,
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
if (error)
goto std_return;
}
tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
......@@ -2925,12 +2858,9 @@ xfs_mkdir(
/*
* Reserve disk quota and the inode.
*/
if (XFS_IS_QUOTA_ON(mp)) {
if (xfs_trans_reserve_quota(tp, udqp, gdqp, resblks, 1, 0)) {
error = XFS_ERROR(EDQUOT);
error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0);
if (error)
goto error_return;
}
}
if (resblks == 0 &&
(error = XFS_DIR_CANENTER(mp, tp, dp, dir_name, dir_namelen)))
......@@ -2999,9 +2929,7 @@ xfs_mkdir(
/*
* Attach the dquots to the new inode and modify the icount incore.
*/
if (XFS_IS_QUOTA_ON(mp)) {
xfs_qm_vop_dqattach_and_dqmod_newinode(tp, cdp, udqp, gdqp);
}
XFS_QM_DQVOPCREATE(mp, tp, cdp, udqp, gdqp);
/*
* If this is a synchronous mount, make sure that the
......@@ -3019,11 +2947,8 @@ xfs_mkdir(
}
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
if (udqp)
xfs_qm_dqrele(udqp);
if (gdqp)
xfs_qm_dqrele(gdqp);
XFS_QM_DQRELE(mp, udqp);
XFS_QM_DQRELE(mp, gdqp);
if (error) {
IRELE(cdp);
}
......@@ -3035,7 +2960,7 @@ xfs_mkdir(
if ( (created || (error != 0 && dm_event_sent != 0)) &&
DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp),
DM_EVENT_POSTCREATE)) {
(void) dm_send_namesp_event(DM_EVENT_POSTCREATE,
(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE,
dir_bdp, DM_RIGHT_NULL,
created ? XFS_ITOBHV(cdp):NULL,
DM_RIGHT_NULL,
......@@ -3051,11 +2976,8 @@ xfs_mkdir(
cancel_flags |= XFS_TRANS_ABORT;
error_return:
xfs_trans_cancel(tp, cancel_flags);
if (udqp)
xfs_qm_dqrele(udqp);
if (gdqp)
xfs_qm_dqrele(gdqp);
XFS_QM_DQRELE(mp, udqp);
XFS_QM_DQRELE(mp, gdqp);
if (!dp_joined_to_trans && (dp != NULL)) {
xfs_iunlock(dp, XFS_ILOCK_EXCL);
......@@ -3093,6 +3015,7 @@ xfs_rmdir(
dir_vp = BHV_TO_VNODE(dir_bdp);
dp = XFS_BHVTOI(dir_bdp);
mp = dp->i_mount;
vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
......@@ -3101,7 +3024,7 @@ xfs_rmdir(
namelen = VNAMELEN(dentry);
if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_REMOVE)) {
error = dm_send_namesp_event(DM_EVENT_REMOVE,
error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE,
dir_bdp, DM_RIGHT_NULL,
NULL, DM_RIGHT_NULL,
name, NULL, 0, 0, 0);
......@@ -3136,18 +3059,14 @@ xfs_rmdir(
/*
* Get the dquots for the inodes.
*/
if (XFS_IS_QUOTA_ON(mp)) {
ASSERT(! error);
if (XFS_NOT_DQATTACHED(mp, dp))
error = xfs_qm_dqattach(dp, 0);
if (!error && dp != cdp && XFS_NOT_DQATTACHED(mp, cdp))
error = xfs_qm_dqattach(cdp, 0);
error = XFS_QM_DQATTACH(mp, dp, 0);
if (!error && dp != cdp)
error = XFS_QM_DQATTACH(mp, cdp, 0);
if (error) {
IRELE(cdp);
REMOVE_DEBUG_TRACE(__LINE__);
goto std_return;
}
}
tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR);
cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
......@@ -3298,9 +3217,8 @@ xfs_rmdir(
/* Fall through to std_return with error = 0 or the errno
* from xfs_trans_commit. */
std_return:
if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp,
DM_EVENT_POSTREMOVE)) {
(void) dm_send_namesp_event(DM_EVENT_POSTREMOVE,
if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_POSTREMOVE)) {
(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE,
dir_bdp, DM_RIGHT_NULL,
NULL, DM_RIGHT_NULL,
name, NULL, dm_di_mode,
......@@ -3391,7 +3309,7 @@ xfs_symlink(
int n;
xfs_buf_t *bp;
xfs_prid_t prid;
xfs_dquot_t *udqp, *gdqp;
struct xfs_dquot *udqp, *gdqp;
uint resblks;
char *link_name = VNAME(dentry);
int link_namelen;
......@@ -3446,10 +3364,9 @@ xfs_symlink(
}
if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_SYMLINK)) {
error = dm_send_namesp_event(DM_EVENT_SYMLINK, dir_bdp, DM_RIGHT_NULL,
NULL, DM_RIGHT_NULL,
link_name, target_path,
0, 0, 0);
error = XFS_SEND_NAMESP(mp, DM_EVENT_SYMLINK, dir_bdp,
DM_RIGHT_NULL, NULL, DM_RIGHT_NULL,
link_name, target_path, 0, 0, 0);
if (error)
return error;
}
......@@ -3465,14 +3382,10 @@ xfs_symlink(
/*
* Make sure that we have allocated dquot(s) on disk.
*/
if (XFS_IS_QUOTA_ON(mp)) {
error = xfs_qm_vop_dqalloc(mp, dp,
current->fsuid, current->fsgid,
XFS_QMOPT_QUOTALL|XFS_QMOPT_INHERIT,
&udqp, &gdqp);
error = XFS_QM_DQVOPALLOC(mp, dp, current->fsuid, current->fsgid,
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
if (error)
goto std_return;
}
tp = xfs_trans_alloc(mp, XFS_TRANS_SYMLINK);
cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
......@@ -3503,12 +3416,9 @@ xfs_symlink(
/*
* Reserve disk quota : blocks and inode.
*/
if (XFS_IS_QUOTA_ON(mp)) {
if (xfs_trans_reserve_quota(tp, udqp, gdqp, resblks, 1, 0)) {
error = XFS_ERROR(EDQUOT);
error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0);
if (error)
goto error_return;
}
}
/*
* Check for ability to enter directory entry, if no space reserved.
......@@ -3543,9 +3453,7 @@ xfs_symlink(
/*
* Also attach the dquot(s) to it, if applicable.
*/
if (XFS_IS_QUOTA_ON(mp)) {
xfs_qm_vop_dqattach_and_dqmod_newinode(tp, ip, udqp, gdqp);
}
XFS_QM_DQVOPCREATE(mp, tp, ip, udqp, gdqp);
if (resblks)
resblks -= XFS_IALLOC_SPACE_RES(mp);
......@@ -3641,21 +3549,18 @@ xfs_symlink(
goto error2;
}
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
if (udqp)
xfs_qm_dqrele(udqp);
if (gdqp)
xfs_qm_dqrele(gdqp);
XFS_QM_DQRELE(mp, udqp);
XFS_QM_DQRELE(mp, gdqp);
/* Fall through to std_return with error = 0 or errno from
* xfs_trans_commit */
std_return:
if (DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp),
DM_EVENT_POSTSYMLINK)) {
(void) dm_send_namesp_event(DM_EVENT_POSTSYMLINK,
(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTSYMLINK,
dir_bdp, DM_RIGHT_NULL,
error? NULL:XFS_ITOBHV(ip),
DM_RIGHT_NULL,
link_name, target_path,
error ? NULL : XFS_ITOBHV(ip),
DM_RIGHT_NULL, link_name, target_path,
0, error, 0);
}
......@@ -3675,10 +3580,8 @@ xfs_symlink(
cancel_flags |= XFS_TRANS_ABORT;
error_return:
xfs_trans_cancel(tp, cancel_flags);
if (udqp)
xfs_qm_dqrele(udqp);
if (gdqp)
xfs_qm_dqrele(gdqp);
XFS_QM_DQRELE(mp, udqp);
XFS_QM_DQRELE(mp, gdqp);
if (!dp_joined_to_trans && (dp != NULL)) {
xfs_iunlock(dp, XFS_ILOCK_EXCL);
......@@ -4165,7 +4068,7 @@ xfs_alloc_file_space(
/*
* determine if this is a realtime file
*/
if ((rt = (ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) != 0) {
if ((rt = XFS_IS_REALTIME_INODE(ip)) != 0) {
if (ip->i_d.di_extsize)
rtextsize = ip->i_d.di_extsize;
else
......@@ -4173,12 +4076,8 @@ xfs_alloc_file_space(
} else
rtextsize = 0;
if (XFS_IS_QUOTA_ON(mp)) {
if (XFS_NOT_DQATTACHED(mp, ip)) {
if ((error = xfs_qm_dqattach(ip, 0)))
if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
return error;
}
}
if (len <= 0)
return XFS_ERROR(EINVAL);
......@@ -4200,7 +4099,7 @@ xfs_alloc_file_space(
end_dmi_offset = offset+len;
if (end_dmi_offset > ip->i_d.di_size)
end_dmi_offset = ip->i_d.di_size;
error = xfs_dm_send_data_event(DM_EVENT_WRITE, XFS_ITOBHV(ip),
error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, XFS_ITOBHV(ip),
offset, end_dmi_offset - offset,
0, NULL);
if (error)
......@@ -4255,15 +4154,11 @@ xfs_alloc_file_space(
break;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
if (XFS_IS_QUOTA_ON(mp)) {
if (xfs_trans_reserve_quota(tp,
ip->i_udquot,
ip->i_gdquot,
resblks, 0, 0)) {
error = XFS_ERROR(EDQUOT);
error = XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp,
ip->i_udquot, ip->i_gdquot, resblks, 0, rt ?
XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
if (error)
goto error1;
}
}
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
......@@ -4308,13 +4203,13 @@ xfs_alloc_file_space(
if (error == ENOSPC && (attr_flags&ATTR_DMI) == 0 &&
DM_EVENT_ENABLED(XFS_MTOVFS(mp), ip, DM_EVENT_NOSPACE)) {
error = dm_send_namesp_event(DM_EVENT_NOSPACE,
error = XFS_SEND_NAMESP(mp, DM_EVENT_NOSPACE,
XFS_ITOBHV(ip), DM_RIGHT_NULL,
XFS_ITOBHV(ip), DM_RIGHT_NULL,
NULL, NULL, 0, 0, 0); /* Delay flag intentionally unused */
if (error == 0)
goto retry; /* Maybe DMAPI app. has made space */
/* else fall through with error from xfs_dm_send_data_event */
/* else fall through with error from XFS_SEND_DATA */
}
return error;
......@@ -4434,36 +4329,30 @@ xfs_free_file_space(
int nimap;
uint resblks;
int rounding;
int specrt;
int rt;
xfs_fileoff_t startoffset_fsb;
xfs_trans_t *tp;
vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address);
mp = ip->i_mount;
if (XFS_IS_QUOTA_ON(mp)) {
if (XFS_NOT_DQATTACHED(mp, ip)) {
if ((error = xfs_qm_dqattach(ip, 0)))
if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
return error;
}
}
error = 0;
if (len <= 0) /* if nothing being freed */
return error;
specrt =
(ip->i_d.di_flags & XFS_DIFLAG_REALTIME) &&
!XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb);
rt = (ip->i_d.di_flags & XFS_DIFLAG_REALTIME);
startoffset_fsb = XFS_B_TO_FSB(mp, offset);
end_dmi_offset = offset + len;
endoffset_fsb = XFS_B_TO_FSBT(mp, end_dmi_offset);
if (offset < ip->i_d.di_size &&
(attr_flags&ATTR_DMI) == 0 &&
(attr_flags & ATTR_DMI) == 0 &&
DM_EVENT_ENABLED(XFS_MTOVFS(mp), ip, DM_EVENT_WRITE)) {
if (end_dmi_offset > ip->i_d.di_size)
end_dmi_offset = ip->i_d.di_size;
error = xfs_dm_send_data_event(DM_EVENT_WRITE, XFS_ITOBHV(ip),
error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, XFS_ITOBHV(ip),
offset, end_dmi_offset - offset,
AT_DELAY_FLAG(attr_flags), NULL);
if (error)
......@@ -4480,11 +4369,11 @@ xfs_free_file_space(
xfs_inval_cached_pages(XFS_ITOV(ip), &(ip->i_iocore), ioffset, 0, 0);
/*
* Need to zero the stuff we're not freeing, on disk.
* If its specrt (realtime & can't use unwritten extents) then
* we actually need to zero the extent edges. Otherwise xfs_bunmapi
* If its a realtime file & can't use unwritten extents then we
* actually need to zero the extent edges. Otherwise xfs_bunmapi
* will take care of it for us.
*/
if (specrt) {
if (rt && !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) {
nimap = 1;
error = xfs_bmapi(NULL, ip, startoffset_fsb, 1, 0, NULL, 0,
&imap, &nimap, NULL);
......@@ -4561,15 +4450,11 @@ xfs_free_file_space(
break;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
if (XFS_IS_QUOTA_ON(mp)) {
if (xfs_trans_reserve_quota(tp,
ip->i_udquot,
ip->i_gdquot,
resblks, 0, 0)) {
error = XFS_ERROR(EDQUOT);
error = XFS_TRANS_RESERVE_QUOTA(mp, tp,
ip->i_udquot, ip->i_gdquot, resblks, 0, rt ?
XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
if (error)
goto error1;
}
}
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
......
......@@ -31,8 +31,8 @@
*/
#include <xfs.h>
#include <xfs_quota_priv.h>
#include <xfs_log_recover.h>
#include "quota/xfs_qm.h"
#include "pagebuf/page_buf_internal.h"
#include <linux/ctype.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment