Commit 384263dc authored by Len Brown's avatar Len Brown

Merge intel.com:/home/lenb/bk/26-latest-ref

into intel.com:/home/lenb/src/26-latest-dev
parents afe244f6 002984e2
......@@ -410,9 +410,9 @@ static void mxser_start(struct tty_struct *);
static void mxser_hangup(struct tty_struct *);
static void mxser_rs_break(struct tty_struct *, int);
static irqreturn_t mxser_interrupt(int, void *, struct pt_regs *);
static inline void mxser_receive_chars(struct mxser_struct *, int *);
static inline void mxser_transmit_chars(struct mxser_struct *);
static inline void mxser_check_modem_status(struct mxser_struct *, int);
static void mxser_receive_chars(struct mxser_struct *, int *);
static void mxser_transmit_chars(struct mxser_struct *);
static void mxser_check_modem_status(struct mxser_struct *, int);
static int mxser_block_til_ready(struct tty_struct *, struct file *, struct mxser_struct *);
static int mxser_startup(struct mxser_struct *);
static void mxser_shutdown(struct mxser_struct *);
......@@ -1989,7 +1989,7 @@ static irqreturn_t mxser_interrupt(int irq, void *dev_id, struct pt_regs *regs)
return handled;
}
static inline void mxser_receive_chars(struct mxser_struct *info, int *status)
static void mxser_receive_chars(struct mxser_struct *info, int *status)
{
struct tty_struct *tty = info->tty;
unsigned char ch, gdl;
......@@ -2143,7 +2143,7 @@ static inline void mxser_receive_chars(struct mxser_struct *info, int *status)
}
static inline void mxser_transmit_chars(struct mxser_struct *info)
static void mxser_transmit_chars(struct mxser_struct *info)
{
int count, cnt;
unsigned long flags;
......@@ -2206,7 +2206,7 @@ static inline void mxser_transmit_chars(struct mxser_struct *info)
spin_unlock_irqrestore(&info->slock, flags);
}
static inline void mxser_check_modem_status(struct mxser_struct *info, int status)
static void mxser_check_modem_status(struct mxser_struct *info, int status)
{
/* update input line counters */
if (status & UART_MSR_TERI)
......
......@@ -35,6 +35,7 @@
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/blkdev.h>
#include "time.h"
#include "kmem.h"
......@@ -46,7 +47,8 @@
void *
kmem_alloc(size_t size, int flags)
{
int retries = 0, lflags = kmem_flags_convert(flags);
int retries = 0;
int lflags = kmem_flags_convert(flags);
void *ptr;
do {
......@@ -57,8 +59,10 @@ kmem_alloc(size_t size, int flags)
if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
return ptr;
if (!(++retries % 100))
printk(KERN_ERR "possible deadlock in %s (mode:0x%x)\n",
printk(KERN_ERR "XFS: possible memory allocation "
"deadlock in %s (mode:0x%x)\n",
__FUNCTION__, lflags);
blk_congestion_wait(WRITE, HZ/50);
} while (1);
}
......@@ -102,7 +106,8 @@ kmem_realloc(void *ptr, size_t newsize, size_t oldsize, int flags)
void *
kmem_zone_alloc(kmem_zone_t *zone, int flags)
{
int retries = 0, lflags = kmem_flags_convert(flags);
int retries = 0;
int lflags = kmem_flags_convert(flags);
void *ptr;
do {
......@@ -110,8 +115,10 @@ kmem_zone_alloc(kmem_zone_t *zone, int flags)
if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
return ptr;
if (!(++retries % 100))
printk(KERN_ERR "possible deadlock in %s (mode:0x%x)\n",
printk(KERN_ERR "XFS: possible memory allocation "
"deadlock in %s (mode:0x%x)\n",
__FUNCTION__, lflags);
blk_congestion_wait(WRITE, HZ/50);
} while (1);
}
......
......@@ -83,7 +83,7 @@ typedef unsigned long xfs_pflags_t;
static __inline unsigned int kmem_flags_convert(int flags)
{
int lflags;
int lflags = __GFP_NOWARN; /* we'll report problems, if need be */
#ifdef DEBUG
if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL))) {
......
......@@ -53,13 +53,10 @@
#include <linux/workqueue.h>
#include <linux/suspend.h>
#include <linux/percpu.h>
#include <linux/blkdev.h>
#include "xfs_linux.h"
#ifndef GFP_READAHEAD
#define GFP_READAHEAD (__GFP_NOWARN|__GFP_NORETRY)
#endif
/*
* File wide globals
*/
......@@ -118,8 +115,8 @@ ktrace_t *pagebuf_trace_buf;
*/
#define pb_to_gfp(flags) \
(((flags) & PBF_READ_AHEAD) ? GFP_READAHEAD : \
((flags) & PBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL)
((((flags) & PBF_READ_AHEAD) ? __GFP_NORETRY : \
((flags) & PBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
#define pb_to_km(flags) \
(((flags) & PBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
......@@ -387,13 +384,13 @@ _pagebuf_lookup_pages(
*/
if (!(++retries % 100))
printk(KERN_ERR
"possible deadlock in %s (mode:0x%x)\n",
"XFS: possible memory allocation "
"deadlock in %s (mode:0x%x)\n",
__FUNCTION__, gfp_mask);
XFS_STATS_INC(pb_page_retries);
pagebuf_daemon_wakeup(0, gfp_mask);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(10);
blk_congestion_wait(WRITE, HZ/50);
goto retry;
}
......@@ -486,7 +483,7 @@ _pagebuf_map_pages(
* which may imply that this call will block until those buffers
* are unlocked. No I/O is implied by this call.
*/
STATIC xfs_buf_t *
xfs_buf_t *
_pagebuf_find( /* find buffer for block */
xfs_buftarg_t *target,/* target for block */
loff_t ioff, /* starting offset of range */
......@@ -578,39 +575,14 @@ _pagebuf_find( /* find buffer for block */
return (pb);
}
/*
* pagebuf_find
* xfs_buf_get_flags assembles a buffer covering the specified range.
*
* pagebuf_find returns a buffer matching the specified range of
* data for the specified target, if any of the relevant blocks
* are in memory. The buffer may have unallocated holes, if
* some, but not all, of the blocks are in memory. Even where
* pages are present in the buffer, not all of every page may be
* valid.
* Storage in memory for all portions of the buffer will be allocated,
* although backing storage may not be.
*/
xfs_buf_t *
pagebuf_find( /* find buffer for block */
/* if the block is in memory */
xfs_buftarg_t *target,/* target for block */
loff_t ioff, /* starting offset of range */
size_t isize, /* length of range */
page_buf_flags_t flags) /* PBF_TRYLOCK */
{
return _pagebuf_find(target, ioff, isize, flags, NULL);
}
/*
* pagebuf_get
*
* pagebuf_get assembles a buffer covering the specified range.
* Some or all of the blocks in the range may be valid. Storage
* in memory for all portions of the buffer will be allocated,
* although backing storage may not be. If PBF_READ is set in
* flags, pagebuf_iostart is called also.
*/
xfs_buf_t *
pagebuf_get( /* allocate a buffer */
xfs_buf_get_flags( /* allocate a buffer */
xfs_buftarg_t *target,/* target for buffer */
loff_t ioff, /* starting offset of range */
size_t isize, /* length of range */
......@@ -640,8 +612,8 @@ pagebuf_get( /* allocate a buffer */
if (!(pb->pb_flags & PBF_MAPPED)) {
error = _pagebuf_map_pages(pb, flags);
if (unlikely(error)) {
printk(KERN_WARNING
"pagebuf_get: failed to map pages\n");
printk(KERN_WARNING "%s: failed to map pages\n",
__FUNCTION__);
goto no_buffer;
}
}
......@@ -655,30 +627,50 @@ pagebuf_get( /* allocate a buffer */
pb->pb_bn = ioff;
pb->pb_count_desired = pb->pb_buffer_length;
if (flags & PBF_READ) {
PB_TRACE(pb, "get", (unsigned long)flags);
return pb;
no_buffer:
if (flags & (PBF_LOCK | PBF_TRYLOCK))
pagebuf_unlock(pb);
pagebuf_rele(pb);
return NULL;
}
xfs_buf_t *
xfs_buf_read_flags(
xfs_buftarg_t *target,
loff_t ioff,
size_t isize,
page_buf_flags_t flags)
{
xfs_buf_t *pb;
flags |= PBF_READ;
pb = xfs_buf_get_flags(target, ioff, isize, flags);
if (pb) {
if (PBF_NOT_DONE(pb)) {
PB_TRACE(pb, "get_read", (unsigned long)flags);
PB_TRACE(pb, "read", (unsigned long)flags);
XFS_STATS_INC(pb_get_read);
pagebuf_iostart(pb, flags);
} else if (flags & PBF_ASYNC) {
PB_TRACE(pb, "get_read_async", (unsigned long)flags);
PB_TRACE(pb, "read_async", (unsigned long)flags);
/*
* Read ahead call which is already satisfied,
* drop the buffer
*/
goto no_buffer;
} else {
PB_TRACE(pb, "get_read_done", (unsigned long)flags);
PB_TRACE(pb, "read_done", (unsigned long)flags);
/* We do not want read in the flags */
pb->pb_flags &= ~PBF_READ;
}
} else {
PB_TRACE(pb, "get_write", (unsigned long)flags);
}
return pb;
no_buffer:
no_buffer:
if (flags & (PBF_LOCK | PBF_TRYLOCK))
pagebuf_unlock(pb);
pagebuf_rele(pb);
......@@ -723,8 +715,8 @@ pagebuf_readahead(
if (bdi_write_congested(bdi))
return;
flags |= (PBF_TRYLOCK|PBF_READ|PBF_ASYNC|PBF_READ_AHEAD);
pagebuf_get(target, ioff, isize, flags);
flags |= (PBF_TRYLOCK|PBF_ASYNC|PBF_READ_AHEAD);
xfs_buf_read_flags(target, ioff, isize, flags);
}
xfs_buf_t *
......@@ -1084,7 +1076,7 @@ _pagebuf_wait_unpin(
* done with respect to that I/O. The pb_iodone routine, if
* present, will be called as a side-effect.
*/
void
STATIC void
pagebuf_iodone_work(
void *v)
{
......@@ -1263,7 +1255,7 @@ bio_end_io_pagebuf(
return 0;
}
void
STATIC void
_pagebuf_ioapply(
xfs_buf_t *pb)
{
......@@ -1473,6 +1465,34 @@ pagebuf_iomove(
* Handling of buftargs.
*/
/*
* Wait for any bufs with callbacks that have been submitted but
* have not yet returned... walk the hash list for the target.
*/
void
xfs_wait_buftarg(
xfs_buftarg_t *target)
{
xfs_buf_t *pb, *n;
pb_hash_t *h;
int i;
for (i = 0; i < NHASH; i++) {
h = &pbhash[i];
again:
spin_lock(&h->pb_hash_lock);
list_for_each_entry_safe(pb, n, &h->pb_hash, pb_hash_list) {
if (pb->pb_target == target &&
!(pb->pb_flags & PBF_FS_MANAGED)) {
spin_unlock(&h->pb_hash_lock);
delay(100);
goto again;
}
}
spin_unlock(&h->pb_hash_lock);
}
}
void
xfs_free_buftarg(
xfs_buftarg_t *btp,
......
......@@ -168,20 +168,36 @@ typedef struct xfs_buf {
/* Finding and Reading Buffers */
extern xfs_buf_t *pagebuf_find( /* find buffer for block if */
extern xfs_buf_t *_pagebuf_find( /* find buffer for block if */
/* the block is in memory */
xfs_buftarg_t *, /* inode for block */
loff_t, /* starting offset of range */
size_t, /* length of range */
page_buf_flags_t); /* PBF_LOCK */
page_buf_flags_t, /* PBF_LOCK */
xfs_buf_t *); /* newly allocated buffer */
extern xfs_buf_t *pagebuf_get( /* allocate a buffer */
#define xfs_incore(buftarg,blkno,len,lockit) \
_pagebuf_find(buftarg, blkno ,len, lockit, NULL)
extern xfs_buf_t *xfs_buf_get_flags( /* allocate a buffer */
xfs_buftarg_t *, /* inode for buffer */
loff_t, /* starting offset of range */
size_t, /* length of range */
page_buf_flags_t); /* PBF_LOCK, PBF_READ, */
/* PBF_ASYNC */
#define xfs_buf_get(target, blkno, len, flags) \
xfs_buf_get_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED)
extern xfs_buf_t *xfs_buf_read_flags( /* allocate and read a buffer */
xfs_buftarg_t *, /* inode for buffer */
loff_t, /* starting offset of range */
size_t, /* length of range */
page_buf_flags_t); /* PBF_LOCK, PBF_ASYNC */
#define xfs_buf_read(target, blkno, len, flags) \
xfs_buf_read_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED)
extern xfs_buf_t *pagebuf_lookup(
xfs_buftarg_t *,
loff_t, /* starting offset of range */
......@@ -472,18 +488,6 @@ extern inline xfs_caddr_t xfs_buf_offset(xfs_buf_t *bp, size_t offset)
#define XFS_BUF_SET_VTYPE(bp, type)
#define XFS_BUF_SET_REF(bp, ref)
#define xfs_buf_read(target, blkno, len, flags) \
pagebuf_get((target), (blkno), (len), \
PBF_LOCK | PBF_READ | PBF_MAPPED)
#define xfs_buf_get(target, blkno, len, flags) \
pagebuf_get((target), (blkno), (len), \
PBF_LOCK | PBF_MAPPED)
#define xfs_buf_read_flags(target, blkno, len, flags) \
pagebuf_get((target), (blkno), (len), PBF_READ | (flags))
#define xfs_buf_get_flags(target, blkno, len, flags) \
pagebuf_get((target), (blkno), (len), (flags))
static inline int xfs_bawrite(void *mp, xfs_buf_t *bp)
{
bp->pb_fspriv3 = mp;
......@@ -508,10 +512,6 @@ static inline void xfs_buf_relse(xfs_buf_t *bp)
#define xfs_biodone(pb) \
pagebuf_iodone(pb, (pb->pb_flags & PBF_FS_DATAIOD), 0)
#define xfs_incore(buftarg,blkno,len,lockit) \
pagebuf_find(buftarg, blkno ,len, lockit)
#define xfs_biomove(pb, off, len, data, rw) \
pagebuf_iomove((pb), (off), (len), (data), \
((rw) == XFS_B_WRITE) ? PBRW_WRITE : PBRW_READ)
......@@ -566,6 +566,7 @@ static inline int xfs_bdwrite(void *mp, xfs_buf_t *bp)
extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *);
extern void xfs_free_buftarg(xfs_buftarg_t *, int);
extern void xfs_wait_buftarg(xfs_buftarg_t *);
extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
extern void xfs_incore_relse(xfs_buftarg_t *, int, int);
extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
......
......@@ -398,7 +398,7 @@ linvfs_file_mmap(
vattr_t va = { .va_mask = XFS_AT_UPDATIME };
int error;
if ((vp->v_type == VREG) && (vp->v_vfsp->vfs_flag & VFS_DMI)) {
if (vp->v_vfsp->vfs_flag & VFS_DMI) {
xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
error = -XFS_SEND_MMAP(mp, vma, 0);
......@@ -473,7 +473,7 @@ linvfs_mprotect(
vnode_t *vp = LINVFS_GET_VP(vma->vm_file->f_dentry->d_inode);
int error = 0;
if ((vp->v_type == VREG) && (vp->v_vfsp->vfs_flag & VFS_DMI)) {
if (vp->v_vfsp->vfs_flag & VFS_DMI) {
if ((vma->vm_flags & VM_MAYSHARE) &&
(newflags & VM_WRITE) && !(vma->vm_flags & VM_WRITE)) {
xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
......
......@@ -64,6 +64,7 @@ xfs_param_t xfs_params = {
.xfs_buf_timer = { 100/2, 1*100, 30*100 },
.xfs_buf_age = { 1*100, 15*100, 7200*100},
.inherit_nosym = { 0, 0, 1 },
.rotorstep = { 1, 1, 255 },
};
/*
......
......@@ -270,7 +270,7 @@ xfs_vget_fsop_handlereq(
/*
* Get the XFS inode, building a vnode to go with it.
*/
error = xfs_iget(mp, NULL, ino, XFS_ILOCK_SHARED, &ip, 0);
error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0);
if (error)
return error;
if (ip == NULL)
......
......@@ -142,6 +142,7 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
#define xfs_buf_timer_centisecs xfs_params.xfs_buf_timer.val
#define xfs_buf_age_centisecs xfs_params.xfs_buf_age.val
#define xfs_inherit_nosymlinks xfs_params.inherit_nosym.val
#define xfs_rotorstep xfs_params.rotorstep.val
#define current_cpu() smp_processor_id()
#define current_pid() (current->pid)
......
......@@ -234,6 +234,10 @@ xfs_initialize_vnode(
vp->v_type = IFTOVT(ip->i_d.di_mode);
xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip);
xfs_set_inodeops(inode);
ip->i_flags &= ~XFS_INEW;
barrier();
unlock_new_inode(inode);
}
}
......@@ -284,7 +288,7 @@ linvfs_destroy_inode(
kmem_cache_free(linvfs_inode_zone, LINVFS_GET_VP(inode));
}
int
STATIC int
xfs_inode_shake(
int priority,
unsigned int gfp_mask)
......
......@@ -134,6 +134,11 @@ STATIC ctl_table xfs_table[] = {
&sysctl_intvec, NULL,
&xfs_params.inherit_nosym.min, &xfs_params.inherit_nosym.max},
{XFS_ROTORSTEP, "rotorstep", &xfs_params.rotorstep.val,
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
&sysctl_intvec, NULL,
&xfs_params.rotorstep.min, &xfs_params.rotorstep.max},
/* please keep this the last entry */
#ifdef CONFIG_PROC_FS
{XFS_STATS_CLEAR, "stats_clear", &xfs_params.stats_clear.val,
......
......@@ -60,6 +60,7 @@ typedef struct xfs_param {
xfs_sysctl_val_t xfs_buf_timer; /* Interval between xfsbufd wakeups. */
xfs_sysctl_val_t xfs_buf_age; /* Metadata buffer age before flush. */
xfs_sysctl_val_t inherit_nosym; /* Inherit the "nosymlinks" flag. */
xfs_sysctl_val_t rotorstep; /* inode32 AG rotoring control knob */
} xfs_param_t;
/*
......@@ -97,6 +98,7 @@ enum {
XFS_BUF_AGE = 17,
/* XFS_IO_BYPASS = 18 */
XFS_INHERIT_NOSYM = 19,
XFS_ROTORSTEP = 20,
};
extern xfs_param_t xfs_params;
......
/*
* Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -85,7 +85,6 @@ kmem_shaker_t xfs_qm_shaker;
STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int);
STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
STATIC int xfs_qm_quotacheck(xfs_mount_t *);
STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
STATIC int xfs_qm_shake(int, unsigned int);
......@@ -349,7 +348,8 @@ xfs_qm_unmount_quotadestroy(
*/
int
xfs_qm_mount_quotas(
xfs_mount_t *mp)
xfs_mount_t *mp,
int mfsi_flags)
{
unsigned long s;
int error = 0;
......@@ -398,22 +398,16 @@ xfs_qm_mount_quotas(
/*
* If any of the quotas are not consistent, do a quotacheck.
*/
if (XFS_QM_NEED_QUOTACHECK(mp)) {
if (XFS_QM_NEED_QUOTACHECK(mp) &&
!(mfsi_flags & XFS_MFSI_NO_QUOTACHECK)) {
#ifdef DEBUG
cmn_err(CE_NOTE, "Doing a quotacheck. Please wait.");
#endif
if ((error = xfs_qm_quotacheck(mp))) {
cmn_err(CE_WARN, "Quotacheck unsuccessful (Error %d): "
"Disabling quotas.",
error);
/*
* We must turn off quotas.
/* Quotacheck has failed and quotas have
* been disabled.
*/
ASSERT(mp->m_quotainfo != NULL);
ASSERT(xfs_Gqm != NULL);
xfs_qm_destroy_quotainfo(mp);
mp->m_qflags = 0;
goto write_changes;
return XFS_ERROR(error);
}
#ifdef DEBUG
cmn_err(CE_NOTE, "Done quotacheck.");
......@@ -1788,7 +1782,7 @@ xfs_qm_dqusage_adjust(
* the case in all other instances. It's OK that we do this because
* quotacheck is done only at mount time.
*/
if ((error = xfs_iget(mp, NULL, ino, XFS_ILOCK_EXCL, &ip, bno))) {
if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip, bno))) {
*res = BULKSTAT_RV_NOTHING;
return (error);
}
......@@ -1875,9 +1869,9 @@ xfs_qm_dqusage_adjust(
/*
* Walk thru all the filesystem inodes and construct a consistent view
* of the disk quota world.
* of the disk quota world. If the quotacheck fails, disable quotas.
*/
STATIC int
int
xfs_qm_quotacheck(
xfs_mount_t *mp)
{
......@@ -1973,7 +1967,20 @@ xfs_qm_quotacheck(
XQM_LIST_PRINT(&(XFS_QI_MPL_LIST(mp)), MPL_NEXT, "++++ Mp list +++");
error_return:
cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname);
if (error) {
cmn_err(CE_WARN, "XFS quotacheck %s: Unsuccessful (Error %d): "
"Disabling quotas.",
mp->m_fsname, error);
/*
* We must turn off quotas.
*/
ASSERT(mp->m_quotainfo != NULL);
ASSERT(xfs_Gqm != NULL);
xfs_qm_destroy_quotainfo(mp);
xfs_mount_reset_sbqflags(mp);
} else {
cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname);
}
return (error);
}
......@@ -2003,14 +2010,14 @@ xfs_qm_init_quotainos(
mp->m_sb.sb_uquotino != NULLFSINO) {
ASSERT(mp->m_sb.sb_uquotino > 0);
if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
0, &uip, 0)))
0, 0, &uip, 0)))
return XFS_ERROR(error);
}
if (XFS_IS_GQUOTA_ON(mp) &&
mp->m_sb.sb_gquotino != NULLFSINO) {
ASSERT(mp->m_sb.sb_gquotino > 0);
if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
0, &gip, 0))) {
0, 0, &gip, 0))) {
if (uip)
VN_RELE(XFS_ITOV(uip));
return XFS_ERROR(error);
......
/*
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -182,10 +182,13 @@ typedef struct xfs_dquot_acct {
#define XFS_QM_HOLD(xqm) ((xqm)->qm_nrefs++)
#define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--)
extern void xfs_mount_reset_sbqflags(xfs_mount_t *);
extern int xfs_qm_init_quotainfo(xfs_mount_t *);
extern void xfs_qm_destroy_quotainfo(xfs_mount_t *);
extern int xfs_qm_mount_quotas(xfs_mount_t *);
extern int xfs_qm_mount_quotas(xfs_mount_t *, int);
extern void xfs_qm_mount_quotainit(xfs_mount_t *, uint);
extern int xfs_qm_quotacheck(xfs_mount_t *);
extern void xfs_qm_unmount_quotadestroy(xfs_mount_t *);
extern int xfs_qm_unmount_quotas(xfs_mount_t *);
extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t);
......
/*
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -207,10 +207,9 @@ xfs_qm_syncall(
}
/*
* When xfsquotas isn't installed and the superblock had quotas, we need to
* clear the quotaflags from superblock.
* Clear the quotaflags in memory and in the superblock.
*/
STATIC void
void
xfs_mount_reset_sbqflags(
xfs_mount_t *mp)
{
......@@ -241,6 +240,8 @@ xfs_mount_reset_sbqflags(
if (xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
XFS_DEFAULT_LOG_COUNT)) {
xfs_trans_cancel(tp, 0);
xfs_fs_cmn_err(CE_ALERT, mp,
"xfs_mount_reset_sbqflags: Superblock update failed!");
return;
}
xfs_mod_sb(tp, XFS_SB_QFLAGS);
......@@ -294,15 +295,12 @@ xfs_qm_newmount(
*/
if (quotaondisk && !XFS_QM_NEED_QUOTACHECK(mp)) {
/*
* If the xfs quota code isn't installed,
* we have to reset the quotachk'd bit.
* If an error occured, qm_mount_quotas code
* has already disabled quotas. So, just finish
* mounting, and get on with the boring life
* without disk quotas.
*/
if (xfs_qm_mount_quotas(mp))
xfs_mount_reset_sbqflags(mp);
xfs_qm_mount_quotas(mp, 0);
} else {
/*
* Clear the quota flags, but remember them. This
......@@ -324,13 +322,13 @@ STATIC int
xfs_qm_endmount(
xfs_mount_t *mp,
uint needquotamount,
uint quotaflags)
uint quotaflags,
int mfsi_flags)
{
if (needquotamount) {
ASSERT(mp->m_qflags == 0);
mp->m_qflags = quotaflags;
if (xfs_qm_mount_quotas(mp))
xfs_mount_reset_sbqflags(mp);
xfs_qm_mount_quotas(mp, mfsi_flags);
}
#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
......
......@@ -404,7 +404,7 @@ xfs_qm_scall_trunc_qfiles(
}
if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) {
error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, &qip, 0);
error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip, 0);
if (! error) {
(void) xfs_truncate_file(mp, qip);
VN_RELE(XFS_ITOV(qip));
......@@ -412,7 +412,7 @@ xfs_qm_scall_trunc_qfiles(
}
if ((flags & XFS_DQ_GROUP) && mp->m_sb.sb_gquotino != NULLFSINO) {
error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, &qip, 0);
error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip, 0);
if (! error) {
(void) xfs_truncate_file(mp, qip);
VN_RELE(XFS_ITOV(qip));
......@@ -555,11 +555,13 @@ xfs_qm_scall_getqstat(
gip = mp->m_quotainfo->qi_gquotaip;
}
if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, &uip, 0) == 0)
if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
0, 0, &uip, 0) == 0)
tempuqip = B_TRUE;
}
if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, &gip, 0) == 0)
if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
0, 0, &gip, 0) == 0)
tempgqip = B_TRUE;
}
if (uip) {
......@@ -1338,7 +1340,7 @@ xfs_qm_internalqcheck_adjust(
ipreleased = B_FALSE;
again:
lock_flags = XFS_ILOCK_SHARED;
if ((error = xfs_iget(mp, NULL, ino, lock_flags, &ip, bno))) {
if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip, bno))) {
*res = BULKSTAT_RV_NOTHING;
return (error);
}
......
......@@ -2247,6 +2247,7 @@ xfs_alloc_vextent(
xfs_alloctype_t type; /* input allocation type */
int bump_rotor = 0;
int no_min = 0;
xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
mp = args->mp;
type = args->otype = args->type;
......@@ -2310,7 +2311,9 @@ xfs_alloc_vextent(
*/
if ((args->userdata == XFS_ALLOC_INITIAL_USER_DATA) &&
(mp->m_flags & XFS_MOUNT_32BITINODES)) {
args->fsbno = XFS_AGB_TO_FSB(mp, mp->m_agfrotor, 0);
args->fsbno = XFS_AGB_TO_FSB(mp,
((mp->m_agfrotor / rotorstep) %
mp->m_sb.sb_agcount), 0);
bump_rotor = 1;
}
args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
......@@ -2326,7 +2329,8 @@ xfs_alloc_vextent(
/*
* Start with the last place we left off.
*/
args->agno = sagno = mp->m_agfrotor;
args->agno = sagno = (mp->m_agfrotor / rotorstep) %
mp->m_sb.sb_agcount;
args->type = XFS_ALLOCTYPE_THIS_AG;
flags = XFS_ALLOC_FLAG_TRYLOCK;
} else if (type == XFS_ALLOCTYPE_FIRST_AG) {
......@@ -2400,8 +2404,14 @@ xfs_alloc_vextent(
}
}
up_read(&mp->m_peraglock);
if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG))
mp->m_agfrotor = (args->agno + 1) % mp->m_sb.sb_agcount;
if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) {
if (args->agno == sagno)
mp->m_agfrotor = (mp->m_agfrotor + 1) %
(mp->m_sb.sb_agcount * rotorstep);
else
mp->m_agfrotor = (args->agno * rotorstep + 1) %
(mp->m_sb.sb_agcount * rotorstep);
}
break;
default:
ASSERT(0);
......
......@@ -169,6 +169,7 @@ xfs_iget_core(
xfs_mount_t *mp,
xfs_trans_t *tp,
xfs_ino_t ino,
uint flags,
uint lock_flags,
xfs_inode_t **ipp,
xfs_daddr_t bno)
......@@ -180,7 +181,6 @@ xfs_iget_core(
ulong version;
int error;
/* REFERENCED */
int newnode;
xfs_chash_t *ch;
xfs_chashlist_t *chl, *chlnew;
SPLDECL(s);
......@@ -193,11 +193,22 @@ xfs_iget_core(
for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) {
if (ip->i_ino == ino) {
/*
* If INEW is set this inode is being set up
* we need to pause and try again.
*/
if (ip->i_flags & XFS_INEW) {
read_unlock(&ih->ih_lock);
delay(1);
XFS_STATS_INC(xs_ig_frecycle);
inode_vp = XFS_ITOV_NULL(ip);
goto again;
}
inode_vp = XFS_ITOV_NULL(ip);
if (inode_vp == NULL) {
/* If IRECLAIM is set this inode is
/*
* If IRECLAIM is set this inode is
* on its way out of the system,
* we need to pause and try again.
*/
......@@ -250,14 +261,15 @@ xfs_iget_core(
XFS_STATS_INC(xs_ig_found);
finish_inode:
if (lock_flags != 0) {
xfs_ilock(ip, lock_flags);
}
newnode = (ip->i_d.di_mode == 0);
if (newnode) {
if (ip->i_d.di_mode == 0) {
if (!(flags & IGET_CREATE))
return ENOENT;
xfs_iocore_inode_reinit(ip);
}
if (lock_flags != 0)
xfs_ilock(ip, lock_flags);
ip->i_flags &= ~XFS_ISTALE;
vn_trace_exit(vp, "xfs_iget.found",
......@@ -293,6 +305,11 @@ xfs_iget_core(
if (lock_flags != 0) {
xfs_ilock(ip, lock_flags);
}
if ((ip->i_d.di_mode == 0) && !(flags & IGET_CREATE)) {
xfs_idestroy(ip);
return ENOENT;
}
/*
* Put ip on its hash chain, unless someone else hashed a duplicate
......@@ -324,6 +341,7 @@ xfs_iget_core(
ih->ih_next = ip;
ip->i_udquot = ip->i_gdquot = NULL;
ih->ih_version++;
ip->i_flags |= XFS_INEW;
write_unlock(&ih->ih_lock);
......@@ -404,8 +422,6 @@ xfs_iget_core(
XFS_MOUNT_IUNLOCK(mp);
newnode = 1;
return_ip:
ASSERT(ip->i_df.if_ext_max ==
XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
......@@ -434,6 +450,7 @@ xfs_iget(
xfs_mount_t *mp,
xfs_trans_t *tp,
xfs_ino_t ino,
uint flags,
uint lock_flags,
xfs_inode_t **ipp,
xfs_daddr_t bno)
......@@ -454,8 +471,8 @@ xfs_iget(
if (inode->i_state & I_NEW) {
inode_allocate:
vn_initialize(inode);
error = xfs_iget_core(vp, mp, tp, ino,
lock_flags, ipp, bno);
error = xfs_iget_core(vp, mp, tp, ino, flags,
lock_flags, ipp, bno);
if (error) {
vn_mark_bad(vp);
if (inode->i_state & I_NEW)
......@@ -576,6 +593,10 @@ xfs_iput_new(xfs_inode_t *ip,
vn_trace_entry(vp, "xfs_iput_new", (inst_t *)__return_address);
if ((ip->i_d.di_mode == 0)) {
ASSERT(!(ip->i_flags & XFS_IRECLAIMABLE));
vn_mark_bad(vp);
}
if (inode->i_state & I_NEW)
unlock_new_inode(inode);
if (lock_flags)
......
......@@ -1163,7 +1163,8 @@ xfs_ialloc(
* This is because we're setting fields here we need
* to prevent others from looking at until we're done.
*/
error = xfs_trans_iget(tp->t_mountp, tp, ino, XFS_ILOCK_EXCL, &ip);
error = xfs_trans_iget(tp->t_mountp, tp, ino,
IGET_CREATE, XFS_ILOCK_EXCL, &ip);
if (error != 0) {
return error;
}
......
......@@ -381,6 +381,7 @@ void xfs_ifork_next_set(xfs_inode_t *ip, int w, int n);
#define XFS_IRECLAIM 0x0008 /* we have started reclaiming this inode */
#define XFS_ISTALE 0x0010 /* inode has been staled */
#define XFS_IRECLAIMABLE 0x0020 /* inode can be reclaimed */
#define XFS_INEW 0x0040
/*
* Flags for inode locking.
......@@ -465,6 +466,9 @@ xfs_inode_t *xfs_bhvtoi(struct bhv_desc *bhvp);
/*
* xfs_iget.c prototypes.
*/
#define IGET_CREATE 1
void xfs_ihash_init(struct xfs_mount *);
void xfs_ihash_free(struct xfs_mount *);
void xfs_chash_init(struct xfs_mount *);
......@@ -473,7 +477,7 @@ xfs_inode_t *xfs_inode_incore(struct xfs_mount *, xfs_ino_t,
struct xfs_trans *);
void xfs_inode_lock_init(xfs_inode_t *, struct vnode *);
int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t,
uint, xfs_inode_t **, xfs_daddr_t);
uint, uint, xfs_inode_t **, xfs_daddr_t);
void xfs_iput(xfs_inode_t *, uint);
void xfs_iput_new(xfs_inode_t *, uint);
void xfs_ilock(xfs_inode_t *, uint);
......
......@@ -102,7 +102,7 @@ xfs_bulkstat_one(
/* We're not being passed a pointer to a dinode. This happens
* if BULKSTAT_FG_IGET is selected. Do the iget.
*/
error = xfs_iget(mp, NULL, ino, XFS_ILOCK_SHARED, &ip, bno);
error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, bno);
if (error) {
*stat = BULKSTAT_RV_NOTHING;
return error;
......
......@@ -902,20 +902,15 @@ xlog_space_left(xlog_t *log, int cycle, int bytes)
} else {
/*
* The reservation head is behind the tail.
* This can only happen when the AIL is empty so the tail
* is equal to the head and the l_roundoff value in the
* log structure is taking up the difference between the
* reservation head and the tail. The bytes accounted for
* by the l_roundoff field are temporarily 'lost' to the
* reservation mechanism, but they are cleaned up when the
* log buffers that created them are reused. These lost
* bytes are what allow the reservation head to fall behind
* the tail in the case that the log is 'empty'.
* In this case we just want to return the size of the
* log as the amount of space left.
*/
ASSERT((tail_cycle == (cycle + 1)) ||
((bytes + log->l_roundoff) >= tail_bytes));
xfs_fs_cmn_err(CE_ALERT, log->l_mp,
"xlog_space_left: head behind tail\n"
" tail_cycle = %d, tail_bytes = %d\n"
" GH cycle = %d, GH bytes = %d",
tail_cycle, tail_bytes, cycle, bytes);
ASSERT(0);
free_bytes = log->l_logsize;
}
return free_bytes;
......@@ -1355,8 +1350,8 @@ xlog_grant_push_ail(xfs_mount_t *mp,
/*
* Flush out the in-core log (iclog) to the on-disk log in a synchronous or
* asynchronous fashion. Previously, we should have moved the current iclog
* Flush out the in-core log (iclog) to the on-disk log in an asynchronous
* fashion. Previously, we should have moved the current iclog
* ptr in the log to point to the next available iclog. This allows further
* write to continue while this code syncs out an iclog ready to go.
* Before an in-core log can be written out, the data section must be scanned
......@@ -1388,8 +1383,11 @@ xlog_sync(xlog_t *log,
int i, ops;
uint count; /* byte count of bwrite */
uint count_init; /* initial count before roundup */
int roundoff; /* roundoff to BB or stripe */
int split = 0; /* split write into two regions */
int error;
SPLDECL(s);
int v2 = XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb);
XFS_STATS_INC(xs_log_writes);
ASSERT(iclog->ic_refcnt == 0);
......@@ -1398,23 +1396,34 @@ xlog_sync(xlog_t *log,
count_init = log->l_iclog_hsize + iclog->ic_offset;
/* Round out the log write size */
if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) &&
log->l_mp->m_sb.sb_logsunit > 1) {
if (v2 && log->l_mp->m_sb.sb_logsunit > 1) {
/* we have a v2 stripe unit to use */
count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init));
} else {
count = BBTOB(BTOBB(count_init));
}
iclog->ic_roundoff = count - count_init;
log->l_roundoff += iclog->ic_roundoff;
roundoff = count - count_init;
ASSERT(roundoff >= 0);
ASSERT((v2 && log->l_mp->m_sb.sb_logsunit > 1 &&
roundoff < log->l_mp->m_sb.sb_logsunit)
||
(log->l_mp->m_sb.sb_logsunit <= 1 &&
roundoff < BBTOB(1)));
xlog_pack_data(log, iclog); /* put cycle number in every block */
/* move grant heads by roundoff in sync */
s = GRANT_LOCK(log);
XLOG_GRANT_ADD_SPACE(log, roundoff, 'w');
XLOG_GRANT_ADD_SPACE(log, roundoff, 'r');
GRANT_UNLOCK(log, s);
/* put cycle number in every block */
xlog_pack_data(log, iclog, roundoff);
/* real byte length */
if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
if (v2) {
INT_SET(iclog->ic_header.h_len,
ARCH_CONVERT,
iclog->ic_offset + iclog->ic_roundoff);
iclog->ic_offset + roundoff);
} else {
INT_SET(iclog->ic_header.h_len, ARCH_CONVERT, iclog->ic_offset);
}
......@@ -2278,11 +2287,6 @@ xlog_state_get_iclog_space(xlog_t *log,
INT_SET(head->h_cycle, ARCH_CONVERT, log->l_curr_cycle);
ASSIGN_LSN(head->h_lsn, log, ARCH_CONVERT);
ASSERT(log->l_curr_block >= 0);
/* round off error from last write with this iclog */
ticket->t_curr_res -= iclog->ic_roundoff;
log->l_roundoff -= iclog->ic_roundoff;
iclog->ic_roundoff = 0;
}
/* If there is enough room to write everything, then do it. Otherwise,
......@@ -2853,7 +2857,6 @@ xlog_state_sync_all(xlog_t *log, uint flags)
* has already taken care of the roundoff from
* the previous sync.
*/
ASSERT(iclog->ic_roundoff == 0);
iclog->ic_refcnt++;
lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT);
xlog_state_switch_iclogs(log, iclog, 0);
......
......@@ -430,7 +430,6 @@ typedef struct xlog_iclog_fields {
int ic_size;
int ic_offset;
int ic_refcnt;
int ic_roundoff;
int ic_bwritecnt;
ushort_t ic_state;
char *ic_datap; /* pointer to iclog data */
......@@ -462,7 +461,6 @@ typedef struct xlog_in_core {
#define ic_size hic_fields.ic_size
#define ic_offset hic_fields.ic_offset
#define ic_refcnt hic_fields.ic_refcnt
#define ic_roundoff hic_fields.ic_roundoff
#define ic_bwritecnt hic_fields.ic_bwritecnt
#define ic_state hic_fields.ic_state
#define ic_datap hic_fields.ic_datap
......@@ -498,7 +496,6 @@ typedef struct log {
xfs_daddr_t l_logBBstart; /* start block of log */
int l_logsize; /* size of log in bytes */
int l_logBBsize; /* size of log in BB chunks */
int l_roundoff; /* round off error of iclogs */
int l_curr_cycle; /* Cycle number of log writes */
int l_prev_cycle; /* Cycle number before last
* block increment */
......@@ -545,7 +542,7 @@ extern int xlog_find_tail(xlog_t *log,
int readonly);
extern int xlog_recover(xlog_t *log, int readonly);
extern int xlog_recover_finish(xlog_t *log, int mfsi_flags);
extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog);
extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
extern void xlog_recover_process_iunlinks(xlog_t *log);
extern struct xfs_buf *xlog_get_bp(xlog_t *, int);
......
......@@ -3262,7 +3262,7 @@ xlog_recover_process_iunlinks(
xfs_buf_relse(agibp);
ino = XFS_AGINO_TO_INO(mp, agno, agino);
error = xfs_iget(mp, NULL, ino, 0, &ip, 0);
error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0);
ASSERT(error || (ip != NULL));
if (!error) {
......@@ -3384,10 +3384,11 @@ xlog_pack_data_checksum(
void
xlog_pack_data(
xlog_t *log,
xlog_in_core_t *iclog)
xlog_in_core_t *iclog,
int roundoff)
{
int i, j, k;
int size = iclog->ic_offset + iclog->ic_roundoff;
int size = iclog->ic_offset + roundoff;
uint cycle_lsn;
xfs_caddr_t dp;
xlog_in_core_2_t *xhdr;
......
......@@ -975,7 +975,7 @@ xfs_mountfs(
* Get and sanity-check the root inode.
* Save the pointer to it in the mount structure.
*/
error = xfs_iget(mp, NULL, sbp->sb_rootino, XFS_ILOCK_EXCL, &rip, 0);
error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip, 0);
if (error) {
cmn_err(CE_WARN, "XFS: failed to read root inode");
goto error3;
......@@ -1036,7 +1036,7 @@ xfs_mountfs(
/*
* Complete the quota initialisation, post-log-replay component.
*/
if ((error = XFS_QM_MOUNT(mp, quotamount, quotaflags)))
if ((error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags)))
goto error4;
return 0;
......@@ -1098,6 +1098,8 @@ xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
xfs_unmountfs_writesb(mp);
xfs_unmountfs_wait(mp); /* wait for async bufs */
xfs_log_unmount(mp); /* Done! No more fs ops. */
xfs_freesb(mp);
......@@ -1142,6 +1144,16 @@ xfs_unmountfs_close(xfs_mount_t *mp, struct cred *cr)
xfs_free_buftarg(mp->m_ddev_targp, 0);
}
void
xfs_unmountfs_wait(xfs_mount_t *mp)
{
if (mp->m_logdev_targp != mp->m_ddev_targp)
xfs_wait_buftarg(mp->m_logdev_targp);
if (mp->m_rtdev_targp)
xfs_wait_buftarg(mp->m_rtdev_targp);
xfs_wait_buftarg(mp->m_ddev_targp);
}
int
xfs_unmountfs_writesb(xfs_mount_t *mp)
{
......
......@@ -133,7 +133,7 @@ struct xfs_dqtrxops;
struct xfs_quotainfo;
typedef int (*xfs_qminit_t)(struct xfs_mount *, uint *, uint *);
typedef int (*xfs_qmmount_t)(struct xfs_mount *, uint, uint);
typedef int (*xfs_qmmount_t)(struct xfs_mount *, uint, uint, int);
typedef int (*xfs_qmunmount_t)(struct xfs_mount *);
typedef void (*xfs_qmdone_t)(struct xfs_mount *);
typedef void (*xfs_dqrele_t)(struct xfs_dquot *);
......@@ -171,8 +171,8 @@ typedef struct xfs_qmops {
#define XFS_QM_INIT(mp, mnt, fl) \
(*(mp)->m_qm_ops.xfs_qminit)(mp, mnt, fl)
#define XFS_QM_MOUNT(mp, mnt, fl) \
(*(mp)->m_qm_ops.xfs_qmmount)(mp, mnt, fl)
#define XFS_QM_MOUNT(mp, mnt, fl, mfsi_flags) \
(*(mp)->m_qm_ops.xfs_qmmount)(mp, mnt, fl, mfsi_flags)
#define XFS_QM_UNMOUNT(mp) \
(*(mp)->m_qm_ops.xfs_qmunmount)(mp)
#define XFS_QM_DONE(mp) \
......@@ -466,6 +466,7 @@ typedef struct xfs_mount {
#define XFS_MFSI_CLIENT 0x02 /* Is a client -- skip lots of stuff */
#define XFS_MFSI_NOUNLINK 0x08 /* Skip unlinked inode processing in */
/* log recovery */
#define XFS_MFSI_NO_QUOTACHECK 0x10 /* Skip quotacheck processing */
/*
* Macros for getting from mount to vfs and back.
......@@ -540,6 +541,7 @@ extern void xfs_mount_free(xfs_mount_t *mp, int remove_bhv);
extern int xfs_mountfs(struct vfs *, xfs_mount_t *mp, int);
extern int xfs_unmountfs(xfs_mount_t *, struct cred *);
extern void xfs_unmountfs_wait(xfs_mount_t *);
extern void xfs_unmountfs_close(xfs_mount_t *, struct cred *);
extern int xfs_unmountfs_writesb(xfs_mount_t *);
extern int xfs_unmount_flush(xfs_mount_t *, int);
......
......@@ -149,7 +149,7 @@ xfs_growfs_rt_alloc(
/*
* Lock the inode.
*/
if ((error = xfs_trans_iget(mp, tp, ino, XFS_ILOCK_EXCL, &ip)))
if ((error = xfs_trans_iget(mp, tp, ino, 0, XFS_ILOCK_EXCL, &ip)))
goto error_exit;
XFS_BMAP_INIT(&flist, &firstblock);
/*
......@@ -189,7 +189,7 @@ xfs_growfs_rt_alloc(
/*
* Lock the bitmap inode.
*/
if ((error = xfs_trans_iget(mp, tp, ino, XFS_ILOCK_EXCL,
if ((error = xfs_trans_iget(mp, tp, ino, 0, XFS_ILOCK_EXCL,
&ip)))
goto error_exit;
/*
......@@ -2042,7 +2042,7 @@ xfs_growfs_rt(
/*
* Lock out other callers by grabbing the bitmap inode lock.
*/
if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino,
if ((error = xfs_trans_iget(mp, tp, 0, mp->m_sb.sb_rbmino,
XFS_ILOCK_EXCL, &ip)))
goto error_exit;
ASSERT(ip == mp->m_rbmip);
......@@ -2057,7 +2057,7 @@ xfs_growfs_rt(
* Get the summary inode into the transaction.
*/
if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rsumino,
XFS_ILOCK_EXCL, &ip)))
0, XFS_ILOCK_EXCL, &ip)))
goto error_exit;
ASSERT(ip == mp->m_rsumip);
/*
......@@ -2177,7 +2177,7 @@ xfs_rtallocate_extent(
/*
* Lock out other callers by grabbing the bitmap inode lock.
*/
error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, XFS_ILOCK_EXCL, &ip);
error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0, XFS_ILOCK_EXCL, &ip);
if (error) {
return error;
}
......@@ -2240,7 +2240,7 @@ xfs_rtfree_extent(
/*
* Synchronize by locking the bitmap inode.
*/
error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, XFS_ILOCK_EXCL, &ip);
error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0, XFS_ILOCK_EXCL, &ip);
if (error) {
return error;
}
......@@ -2348,12 +2348,12 @@ xfs_rtmount_inodes(
sbp = &mp->m_sb;
if (sbp->sb_rbmino == NULLFSINO)
return 0;
error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, &mp->m_rbmip, 0);
error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip, 0);
if (error)
return error;
ASSERT(mp->m_rbmip != NULL);
ASSERT(sbp->sb_rsumino != NULLFSINO);
error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, &mp->m_rsumip, 0);
error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip, 0);
if (error) {
VN_RELE(XFS_ITOV(mp->m_rbmip));
return error;
......@@ -2384,7 +2384,7 @@ xfs_rtpick_extent(
__uint64_t seq; /* sequence number of file creation */
__uint64_t *seqp; /* pointer to seqno in inode */
error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, XFS_ILOCK_EXCL, &ip);
error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0, XFS_ILOCK_EXCL, &ip);
if (error)
return error;
ASSERT(ip == mp->m_rbmip);
......
......@@ -1007,7 +1007,7 @@ void xfs_trans_stale_inode_buf(xfs_trans_t *, struct xfs_buf *);
void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint);
void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *);
int xfs_trans_iget(struct xfs_mount *, xfs_trans_t *,
xfs_ino_t , uint, struct xfs_inode **);
xfs_ino_t , uint, uint, struct xfs_inode **);
void xfs_trans_ijoin(xfs_trans_t *, struct xfs_inode *, uint);
void xfs_trans_ihold(xfs_trans_t *, struct xfs_inode *);
void xfs_trans_ihold_release(xfs_trans_t *, struct xfs_inode *);
......
......@@ -95,6 +95,7 @@ xfs_trans_iget(
xfs_mount_t *mp,
xfs_trans_t *tp,
xfs_ino_t ino,
uint flags,
uint lock_flags,
xfs_inode_t **ipp)
{
......@@ -106,9 +107,8 @@ xfs_trans_iget(
* If the transaction pointer is NULL, just call the normal
* xfs_iget().
*/
if (tp == NULL) {
return (xfs_iget(mp, NULL, ino, lock_flags, ipp, 0));
}
if (tp == NULL)
return xfs_iget(mp, NULL, ino, flags, lock_flags, ipp, 0);
/*
* If we find the inode in core with this transaction
......@@ -148,7 +148,7 @@ xfs_trans_iget(
}
ASSERT(lock_flags & XFS_ILOCK_EXCL);
error = xfs_iget(tp->t_mountp, tp, ino, lock_flags, &ip, 0);
error = xfs_iget(tp->t_mountp, tp, ino, flags, lock_flags, &ip, 0);
if (error) {
return error;
}
......@@ -186,7 +186,6 @@ xfs_trans_iget(
return 0;
}
/*
* Add the locked inode to the transaction.
* The inode must be locked, and it cannot be associated with any
......
......@@ -110,7 +110,7 @@ xfs_dir_lookup_int(
* reservation in the inactive routine.
*/
xfs_iunlock(dp, lock_mode);
error = xfs_iget(dp->i_mount, NULL, *inum, 0, ipp, 0);
error = xfs_iget(dp->i_mount, NULL, *inum, 0, 0, ipp, 0);
xfs_ilock(dp, lock_mode);
if (error) {
......
......@@ -1610,7 +1610,7 @@ xfs_vget(
if (ino == 0)
return XFS_ERROR(ESTALE);
error = xfs_iget(mp, NULL, ino, XFS_ILOCK_SHARED, &ip, 0);
error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0);
if (error) {
*vpp = NULL;
return error;
......
......@@ -242,9 +242,8 @@ void reparent_to_init(void)
memcpy(current->signal->rlim, init_task.signal->rlim,
sizeof(current->signal->rlim));
atomic_inc(&(INIT_USER->__count));
switch_uid(INIT_USER);
write_unlock_irq(&tasklist_lock);
switch_uid(INIT_USER);
}
void __set_special_pids(pid_t session, pid_t pgrp)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment