Commit d61a4de1 authored by Nathan Scott's avatar Nathan Scott

Merge sgi.com:/source2/linux-2.6 into sgi.com:/source2/xfs-linux-2.6

parents 85283764 78ba94ec
......@@ -108,7 +108,7 @@ linvfs_unwritten_done(
struct buffer_head *bh,
int uptodate)
{
page_buf_t *pb = (page_buf_t *)bh->b_private;
xfs_buf_t *pb = (xfs_buf_t *)bh->b_private;
ASSERT(buffer_unwritten(bh));
bh->b_end_io = NULL;
......@@ -265,9 +265,9 @@ xfs_map_at_offset(
STATIC struct page *
xfs_probe_unwritten_page(
struct address_space *mapping,
unsigned long index,
pgoff_t index,
xfs_iomap_t *iomapp,
page_buf_t *pb,
xfs_buf_t *pb,
unsigned long max_offset,
unsigned long *fsbs,
unsigned int bbits)
......@@ -316,7 +316,7 @@ xfs_probe_unwritten_page(
STATIC unsigned int
xfs_probe_unmapped_page(
struct address_space *mapping,
unsigned long index,
pgoff_t index,
unsigned int pg_offset)
{
struct page *page;
......@@ -356,8 +356,8 @@ xfs_probe_unmapped_cluster(
struct buffer_head *bh,
struct buffer_head *head)
{
unsigned long tindex, tlast, tloff;
unsigned int len, total = 0;
pgoff_t tindex, tlast, tloff;
unsigned int pg_offset, len, total = 0;
struct address_space *mapping = inode->i_mapping;
/* First sum forwards in this page */
......@@ -382,9 +382,9 @@ xfs_probe_unmapped_cluster(
total += len;
}
if (tindex == tlast &&
(tloff = i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
(pg_offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
total += xfs_probe_unmapped_page(mapping,
tindex, tloff);
tindex, pg_offset);
}
}
return total;
......@@ -398,7 +398,7 @@ xfs_probe_unmapped_cluster(
STATIC struct page *
xfs_probe_delalloc_page(
struct inode *inode,
unsigned long index)
pgoff_t index)
{
struct page *page;
......@@ -445,7 +445,7 @@ xfs_map_unwritten(
{
struct buffer_head *bh = curr;
xfs_iomap_t *tmp;
page_buf_t *pb;
xfs_buf_t *pb;
loff_t offset, size;
unsigned long nblocks = 0;
......@@ -497,8 +497,9 @@ xfs_map_unwritten(
*/
if (bh == head) {
struct address_space *mapping = inode->i_mapping;
unsigned long tindex, tloff, tlast, bs;
unsigned int bbits = inode->i_blkbits;
pgoff_t tindex, tloff, tlast;
unsigned long bs;
unsigned int pg_offset, bbits = inode->i_blkbits;
struct page *page;
tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
......@@ -522,10 +523,10 @@ xfs_map_unwritten(
}
if (tindex == tlast &&
(tloff = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) {
(pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) {
page = xfs_probe_unwritten_page(mapping,
tindex, iomapp, pb,
tloff, &bs, bbits);
pg_offset, &bs, bbits);
if (page) {
nblocks += bs;
atomic_add(bs, &pb->pb_io_remaining);
......@@ -603,7 +604,8 @@ xfs_convert_page(
{
struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
xfs_iomap_t *mp = iomapp, *tmp;
unsigned long end, offset, end_index;
unsigned long end, offset;
pgoff_t end_index;
int i = 0, index = 0;
int bbits = inode->i_blkbits;
......@@ -671,12 +673,12 @@ xfs_convert_page(
STATIC void
xfs_cluster_write(
struct inode *inode,
unsigned long tindex,
pgoff_t tindex,
xfs_iomap_t *iomapp,
int startio,
int all_bh)
{
unsigned long tlast;
pgoff_t tlast;
struct page *page;
tlast = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT;
......@@ -716,7 +718,8 @@ xfs_page_state_convert(
{
struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
xfs_iomap_t *iomp, iomap;
unsigned long p_offset = 0, end_index;
unsigned long p_offset = 0;
pgoff_t end_index;
loff_t offset;
unsigned long long end_offset;
int len, err, i, cnt = 0, uptodate = 1;
......
This diff is collapsed.
This diff is collapsed.
......@@ -50,19 +50,19 @@ unsigned long xfs_physmem;
*/
xfs_param_t xfs_params = {
/* MIN DFLT MAX */
.restrict_chown = { 0, 1, 1 },
.sgid_inherit = { 0, 0, 1 },
.symlink_mode = { 0, 0, 1 },
.panic_mask = { 0, 0, 127 },
.error_level = { 0, 3, 11 },
.sync_interval = { HZ, 30*HZ, 60*HZ },
.stats_clear = { 0, 0, 1 },
.inherit_sync = { 0, 1, 1 },
.inherit_nodump = { 0, 1, 1 },
.inherit_noatim = { 0, 1, 1 },
.flush_interval = { HZ/2, HZ, 30*HZ },
.age_buffer = { 1*HZ, 15*HZ, 300*HZ },
/* MIN DFLT MAX */
.restrict_chown = { 0, 1, 1 },
.sgid_inherit = { 0, 0, 1 },
.symlink_mode = { 0, 0, 1 },
.panic_mask = { 0, 0, 127 },
.error_level = { 0, 3, 11 },
.sync_interval = { USER_HZ, 30*USER_HZ, 7200*USER_HZ },
.stats_clear = { 0, 0, 1 },
.inherit_sync = { 0, 1, 1 },
.inherit_nodump = { 0, 1, 1 },
.inherit_noatim = { 0, 1, 1 },
.flush_interval = { USER_HZ/2, USER_HZ, 30*USER_HZ },
.age_buffer = { 1*USER_HZ, 15*USER_HZ, 7200*USER_HZ },
};
/*
......
......@@ -659,7 +659,7 @@ xfs_ioctl(
case XFS_IOC_DIOINFO: {
struct dioattr da;
pb_target_t *target =
xfs_buftarg_t *target =
(ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
mp->m_rtdev_targp : mp->m_ddev_targp;
......
......@@ -134,13 +134,13 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
#define irix_symlink_mode xfs_params.symlink_mode.val
#define xfs_panic_mask xfs_params.panic_mask.val
#define xfs_error_level xfs_params.error_level.val
#define xfs_syncd_interval xfs_params.sync_interval.val
#define xfs_syncd_interval (xfs_params.sync_interval.val * HZ / USER_HZ)
#define xfs_stats_clear xfs_params.stats_clear.val
#define xfs_inherit_sync xfs_params.inherit_sync.val
#define xfs_inherit_nodump xfs_params.inherit_nodump.val
#define xfs_inherit_noatime xfs_params.inherit_noatim.val
#define xfs_flush_interval xfs_params.flush_interval.val
#define xfs_age_buffer xfs_params.age_buffer.val
#define xfs_flush_interval (xfs_params.flush_interval.val * HZ / USER_HZ)
#define xfs_age_buffer (xfs_params.age_buffer.val * HZ / USER_HZ)
#define current_cpu() smp_processor_id()
#define current_pid() (current->pid)
......@@ -247,10 +247,11 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
#define howmany(x, y) (((x)+((y)-1))/(y))
#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
static inline void xfs_stack_trace(void)
{
dump_stack();
}
#define xfs_stack_trace() dump_stack()
#define xfs_itruncate_data(ip, off) \
(-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off)))
/* Move the kernel do_div definition off to one side */
......
......@@ -301,7 +301,7 @@ xfs_read(
/* END copy & waste from filemap.c */
if (ioflags & IO_ISDIRECT) {
pb_target_t *target =
xfs_buftarg_t *target =
(ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
mp->m_rtdev_targp : mp->m_ddev_targp;
if ((*offset & target->pbr_smask) ||
......@@ -687,7 +687,7 @@ xfs_write(
}
if (ioflags & IO_ISDIRECT) {
pb_target_t *target =
xfs_buftarg_t *target =
(xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
mp->m_rtdev_targp : mp->m_ddev_targp;
......
......@@ -38,7 +38,7 @@ struct xfs_mount;
struct xfs_iocore;
struct xfs_inode;
struct xfs_bmbt_irec;
struct page_buf_s;
struct xfs_buf;
struct xfs_iomap;
#if defined(XFS_RW_TRACE)
......@@ -89,8 +89,8 @@ extern void xfs_inval_cached_trace(struct xfs_iocore *,
extern int xfs_bmap(struct bhv_desc *, xfs_off_t, ssize_t, int,
struct xfs_iomap *, int *);
extern int xfsbdstrat(struct xfs_mount *, struct page_buf_s *);
extern int xfs_bdstrat_cb(struct page_buf_s *);
extern int xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
extern int xfs_bdstrat_cb(struct xfs_buf *);
extern int xfs_zero_eof(struct vnode *, struct xfs_iocore *, xfs_off_t,
xfs_fsize_t, xfs_fsize_t);
......
/*
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -153,8 +153,7 @@ xfs_set_inodeops(
inode->i_mapping->a_ops = &linvfs_aops;
} else {
inode->i_op = &linvfs_file_inode_operations;
init_special_inode(inode, inode->i_mode,
inode->i_rdev);
init_special_inode(inode, inode->i_mode, inode->i_rdev);
}
}
......@@ -287,7 +286,7 @@ void
xfs_flush_buftarg(
xfs_buftarg_t *btp)
{
pagebuf_delwri_flush(btp, PBDF_WAIT, NULL);
pagebuf_delwri_flush(btp, 1, NULL);
}
void
......@@ -448,7 +447,8 @@ linvfs_clear_inode(
#define SYNCD_FLAGS (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR)
STATIC int
syncd(void *arg)
xfssyncd(
void *arg)
{
vfs_t *vfsp = (vfs_t *) arg;
int error;
......@@ -480,20 +480,22 @@ syncd(void *arg)
}
STATIC int
linvfs_start_syncd(vfs_t *vfsp)
linvfs_start_syncd(
vfs_t *vfsp)
{
int pid;
int pid;
pid = kernel_thread(syncd, (void *) vfsp,
pid = kernel_thread(xfssyncd, (void *) vfsp,
CLONE_VM | CLONE_FS | CLONE_FILES);
if (pid < 0)
return pid;
return -pid;
wait_event(vfsp->vfs_wait_sync_task, vfsp->vfs_sync_task);
return 0;
}
STATIC void
linvfs_stop_syncd(vfs_t *vfsp)
linvfs_stop_syncd(
vfs_t *vfsp)
{
vfsp->vfs_flag |= VFS_UMOUNT;
wmb();
......@@ -735,7 +737,7 @@ linvfs_fill_super(
struct vfs *vfsp = vfs_allocate();
struct xfs_mount_args *args = xfs_args_allocate(sb);
struct kstatfs statvfs;
int error;
int error, error2;
vfsp->vfs_super = sb;
LINVFS_SET_VFS(sb, vfsp);
......@@ -776,11 +778,15 @@ linvfs_fill_super(
goto fail_unmount;
sb->s_root = d_alloc_root(LINVFS_GET_IP(rootvp));
if (!sb->s_root)
if (!sb->s_root) {
error = ENOMEM;
goto fail_vnrele;
if (is_bad_inode(sb->s_root->d_inode))
}
if (is_bad_inode(sb->s_root->d_inode)) {
error = EINVAL;
goto fail_vnrele;
if (linvfs_start_syncd(vfsp))
}
if ((error = linvfs_start_syncd(vfsp)))
goto fail_vnrele;
vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address);
......@@ -796,7 +802,7 @@ linvfs_fill_super(
}
fail_unmount:
VFS_UNMOUNT(vfsp, 0, NULL, error);
VFS_UNMOUNT(vfsp, 0, NULL, error2);
fail_vfsop:
vfs_deallocate(vfsp);
......
......@@ -112,7 +112,7 @@ extern void xfs_qm_exit(void);
struct xfs_inode;
struct xfs_mount;
struct pb_target;
struct xfs_buftarg;
struct block_device;
extern __uint64_t xfs_max_file_offset(unsigned int);
......@@ -126,12 +126,12 @@ extern int xfs_blkdev_get(struct xfs_mount *, const char *,
struct block_device **);
extern void xfs_blkdev_put(struct block_device *);
extern struct pb_target *xfs_alloc_buftarg(struct block_device *);
extern void xfs_relse_buftarg(struct pb_target *);
extern void xfs_free_buftarg(struct pb_target *);
extern void xfs_flush_buftarg(struct pb_target *);
extern int xfs_readonly_buftarg(struct pb_target *);
extern void xfs_setsize_buftarg(struct pb_target *, unsigned int, unsigned int);
extern unsigned int xfs_getsize_buftarg(struct pb_target *);
extern struct xfs_buftarg *xfs_alloc_buftarg(struct block_device *);
extern void xfs_relse_buftarg(struct xfs_buftarg *);
extern void xfs_free_buftarg(struct xfs_buftarg *);
extern void xfs_flush_buftarg(struct xfs_buftarg *);
extern int xfs_readonly_buftarg(struct xfs_buftarg *);
extern void xfs_setsize_buftarg(struct xfs_buftarg *, unsigned int, unsigned int);
extern unsigned int xfs_getsize_buftarg(struct xfs_buftarg *);
#endif /* __XFS_SUPER_H__ */
......@@ -111,7 +111,7 @@ posix_acl_xattr_to_xfs(
return EINVAL;
if (src->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION))
return EINVAL;
return EOPNOTSUPP;
memset(dest, 0, sizeof(xfs_acl_t));
dest->acl_cnt = posix_acl_xattr_count(size);
......
/*
* Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -224,12 +224,21 @@ xfs_dir2_leafn_add(
mp = dp->i_mount;
tp = args->trans;
leaf = bp->data;
/*
* Quick check just to make sure we are not going to index
* into other peoples memory
*/
if (index < 0)
return XFS_ERROR(EFSCORRUPTED);
/*
* If there are already the maximum number of leaf entries in
* the block, if there are no stale entries it won't fit.
* Caller will do a split. If there are stale entries we'll do
* a compact.
*/
if (INT_GET(leaf->hdr.count, ARCH_CONVERT) == XFS_DIR2_MAX_LEAF_ENTS(mp)) {
if (INT_ISZERO(leaf->hdr.stale, ARCH_CONVERT))
return XFS_ERROR(ENOSPC);
......@@ -828,12 +837,24 @@ xfs_dir2_leafn_rebalance(
state->inleaf = !swap;
else
state->inleaf =
swap ^ (args->hashval < INT_GET(leaf2->ents[0].hashval, ARCH_CONVERT));
swap ^ (blk1->index <= INT_GET(leaf1->hdr.count, ARCH_CONVERT));
/*
* Adjust the expected index for insertion.
*/
if (!state->inleaf)
blk2->index = blk1->index - INT_GET(leaf1->hdr.count, ARCH_CONVERT);
/*
* Finally sanity check just to make sure we are not returning a negative index
*/
if(blk2->index < 0) {
state->inleaf = 1;
blk2->index = 0;
cmn_err(CE_ALERT,
"xfs_dir2_leafn_rebalance: picked the wrong leaf? reverting orignal leaf: "
"blk1->index %d\n",
blk1->index);
}
}
/*
......
......@@ -591,10 +591,11 @@ xfs_iomap_write_delay(
firstblock = NULLFSBLOCK;
/*
* roundup the allocation request to m_dalign boundary if file size
* is greater that 512K and we are allocating past the allocation eof
* Roundup the allocation request to a stripe unit (m_dalign) boundary
* if the file size is >= stripe unit size, and we are allocating past
* the allocation eof.
*/
if (mp->m_dalign && (isize >= mp->m_dalign) && aeof) {
if (mp->m_dalign && (isize >= XFS_FSB_TO_B(mp, mp->m_dalign)) && aeof) {
int eof;
xfs_fileoff_t new_last_fsb;
new_last_fsb = roundup_64(last_fsb, mp->m_dalign);
......
......@@ -213,9 +213,9 @@ xfs_cleanup(void)
*/
STATIC int
xfs_start_flags(
struct vfs *vfs,
struct xfs_mount_args *ap,
struct xfs_mount *mp,
int ronly)
struct xfs_mount *mp)
{
/* Values are in BBs */
if ((ap->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) {
......@@ -305,7 +305,7 @@ xfs_start_flags(
* no recovery flag requires a read-only mount
*/
if (ap->flags & XFSMNT_NORECOVERY) {
if (!ronly) {
if (!(vfs->vfs_flag & VFS_RDONLY)) {
cmn_err(CE_WARN,
"XFS: tried to mount a FS read-write without recovery!");
return XFS_ERROR(EINVAL);
......@@ -327,10 +327,12 @@ xfs_start_flags(
*/
STATIC int
xfs_finish_flags(
struct vfs *vfs,
struct xfs_mount_args *ap,
struct xfs_mount *mp,
int ronly)
struct xfs_mount *mp)
{
int ronly = (vfs->vfs_flag & VFS_RDONLY);
/* Fail a mount where the logbuf is smaller then the log stripe */
if (XFS_SB_VERSION_HASLOGV2(&mp->m_sb)) {
if ((ap->logbufsize == -1) &&
......@@ -420,7 +422,6 @@ xfs_mount(
struct bhv_desc *p;
struct xfs_mount *mp = XFS_BHVTOM(bhvp);
struct block_device *ddev, *logdev, *rtdev;
int ronly = (vfsp->vfs_flag & VFS_RDONLY);
int flags = 0, error;
ddev = vfsp->vfs_super->s_bdev;
......@@ -472,13 +473,13 @@ xfs_mount(
/*
* Setup flags based on mount(2) options and then the superblock
*/
error = xfs_start_flags(args, mp, ronly);
error = xfs_start_flags(vfsp, args, mp);
if (error)
goto error;
error = xfs_readsb(mp);
if (error)
goto error;
error = xfs_finish_flags(args, mp, ronly);
error = xfs_finish_flags(vfsp, args, mp);
if (error) {
xfs_freesb(mp);
goto error;
......@@ -636,8 +637,7 @@ xfs_mntupdate(
*/
do {
VFS_SYNC(vfsp, REMOUNT_READONLY_FLAGS, NULL, error);
pagebuf_delwri_flush(mp->m_ddev_targp, PBDF_WAIT,
&pincount);
pagebuf_delwri_flush(mp->m_ddev_targp, 1, &pincount);
if(0 == pincount) { delay(50); count++; }
} while (count < 2);
......
......@@ -680,18 +680,12 @@ xfs_setattr(
* once it is a part of the transaction.
*/
if (mask & XFS_AT_SIZE) {
if (vap->va_size > ip->i_d.di_size) {
code = 0;
if (vap->va_size > ip->i_d.di_size)
code = xfs_igrow_start(ip, vap->va_size, credp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
} else if (vap->va_size <= ip->i_d.di_size) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE,
(xfs_fsize_t)vap->va_size);
code = 0;
} else {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
code = 0;
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
if (!code)
code = xfs_itruncate_data(ip, vap->va_size);
if (code) {
ASSERT(tp == NULL);
lock_flags &= ~XFS_ILOCK_EXCL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment