Commit c8636b90 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'ufs-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull ufs fixes from Al Viro:
 "Fix assorted ufs bugs: a couple of deadlocks, fs corruption in
  truncate(), oopsen on tail unpacking and truncate when racing with
  vmscan, mild fs corruption (free blocks stats summary buggered, *BSD
  fsck would complain and fix), several instances of broken logics
  around reserved blocks (starting with "check almost never triggers
  when it should" and then there are issues with sufficiently large
  UFS2)"

[ Note: ufs hasn't gotten any loving in a long time, because nobody
  really seems to use it. These ufs fixes are triggered by people
  actually caring now, not some sudden influx of new bugs.  - Linus ]

* 'ufs-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  ufs_truncate_blocks(): fix the case when size is in the last direct block
  ufs: more deadlock prevention on tail unpacking
  ufs: avoid grabbing ->truncate_mutex if possible
  ufs_get_locked_page(): make sure we have buffer_heads
  ufs: fix s_size/s_dsize users
  ufs: fix reserved blocks check
  ufs: make ufs_freespace() return signed
  ufs: fix logics in "ufs: make fsck -f happy"
parents ccd3d905 a8fad984
......@@ -400,11 +400,13 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
/*
* There is not enough space for user on the device
*/
if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) {
if (!capable(CAP_SYS_RESOURCE)) {
mutex_unlock(&UFS_SB(sb)->s_lock);
UFSD("EXIT (FAILED)\n");
return 0;
}
}
if (goal >= uspi->s_size)
goal = 0;
......@@ -421,12 +423,12 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
if (result) {
ufs_clear_frags(inode, result + oldcount,
newcount - oldcount, locked_page != NULL);
*err = 0;
write_seqlock(&UFS_I(inode)->meta_lock);
ufs_cpu_to_data_ptr(sb, p, result);
write_sequnlock(&UFS_I(inode)->meta_lock);
*err = 0;
UFS_I(inode)->i_lastfrag =
max(UFS_I(inode)->i_lastfrag, fragment + count);
write_sequnlock(&UFS_I(inode)->meta_lock);
}
mutex_unlock(&UFS_SB(sb)->s_lock);
UFSD("EXIT, result %llu\n", (unsigned long long)result);
......@@ -439,8 +441,10 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
result = ufs_add_fragments(inode, tmp, oldcount, newcount);
if (result) {
*err = 0;
read_seqlock_excl(&UFS_I(inode)->meta_lock);
UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
fragment + count);
read_sequnlock_excl(&UFS_I(inode)->meta_lock);
ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
locked_page != NULL);
mutex_unlock(&UFS_SB(sb)->s_lock);
......@@ -474,16 +478,16 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
if (result) {
ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
locked_page != NULL);
mutex_unlock(&UFS_SB(sb)->s_lock);
ufs_change_blocknr(inode, fragment - oldcount, oldcount,
uspi->s_sbbase + tmp,
uspi->s_sbbase + result, locked_page);
*err = 0;
write_seqlock(&UFS_I(inode)->meta_lock);
ufs_cpu_to_data_ptr(sb, p, result);
write_sequnlock(&UFS_I(inode)->meta_lock);
*err = 0;
UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
fragment + count);
mutex_unlock(&UFS_SB(sb)->s_lock);
write_sequnlock(&UFS_I(inode)->meta_lock);
if (newcount < request)
ufs_free_fragments (inode, result + newcount, request - newcount);
ufs_free_fragments (inode, tmp, oldcount);
......
......@@ -401,13 +401,20 @@ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buff
u64 phys64 = 0;
unsigned frag = fragment & uspi->s_fpbmask;
if (!create) {
phys64 = ufs_frag_map(inode, offsets, depth);
if (phys64)
map_bh(bh_result, sb, phys64 + frag);
return 0;
}
if (!create)
goto done;
if (phys64) {
if (fragment >= UFS_NDIR_FRAGMENT)
goto done;
read_seqlock_excl(&UFS_I(inode)->meta_lock);
if (fragment < UFS_I(inode)->i_lastfrag) {
read_sequnlock_excl(&UFS_I(inode)->meta_lock);
goto done;
}
read_sequnlock_excl(&UFS_I(inode)->meta_lock);
}
/* This code entered only while writing ....? */
mutex_lock(&UFS_I(inode)->truncate_mutex);
......@@ -451,6 +458,11 @@ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buff
}
mutex_unlock(&UFS_I(inode)->truncate_mutex);
return err;
done:
if (phys64)
map_bh(bh_result, sb, phys64 + frag);
return 0;
}
static int ufs_writepage(struct page *page, struct writeback_control *wbc)
......@@ -874,7 +886,6 @@ static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
ctx->to = from + count;
}
#define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
static void ufs_trunc_direct(struct inode *inode)
......@@ -1112,19 +1123,24 @@ static void ufs_truncate_blocks(struct inode *inode)
struct super_block *sb = inode->i_sb;
struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
unsigned offsets[4];
int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets);
int depth;
int depth2;
unsigned i;
struct ufs_buffer_head *ubh[3];
void *p;
u64 block;
if (inode->i_size) {
sector_t last = (inode->i_size - 1) >> uspi->s_bshift;
depth = ufs_block_to_path(inode, last, offsets);
if (!depth)
return;
} else {
depth = 1;
}
/* find the last non-zero in offsets[] */
for (depth2 = depth - 1; depth2; depth2--)
if (offsets[depth2])
if (offsets[depth2] != uspi->s_apb - 1)
break;
mutex_lock(&ufsi->truncate_mutex);
......@@ -1133,9 +1149,8 @@ static void ufs_truncate_blocks(struct inode *inode)
offsets[0] = UFS_IND_BLOCK;
} else {
/* get the blocks that should be partially emptied */
p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]);
p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++);
for (i = 0; i < depth2; i++) {
offsets[i]++; /* next branch is fully freed */
block = ufs_data_ptr_to_cpu(sb, p);
if (!block)
break;
......@@ -1146,7 +1161,7 @@ static void ufs_truncate_blocks(struct inode *inode)
write_sequnlock(&ufsi->meta_lock);
break;
}
p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]);
p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++);
}
while (i--)
free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
......@@ -1161,7 +1176,9 @@ static void ufs_truncate_blocks(struct inode *inode)
free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
}
}
read_seqlock_excl(&ufsi->meta_lock);
ufsi->i_lastfrag = DIRECT_FRAGMENT;
read_sequnlock_excl(&ufsi->meta_lock);
mark_inode_dirty(inode);
mutex_unlock(&ufsi->truncate_mutex);
}
......
......@@ -480,7 +480,7 @@ static void ufs_setup_cstotal(struct super_block *sb)
usb3 = ubh_get_usb_third(uspi);
if ((mtype == UFS_MOUNT_UFSTYPE_44BSD &&
(usb1->fs_flags & UFS_FLAGS_UPDATED)) ||
(usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) ||
mtype == UFS_MOUNT_UFSTYPE_UFS2) {
/*we have statistic in different place, then usual*/
uspi->cs_total.cs_ndir = fs64_to_cpu(sb, usb2->fs_un.fs_u2.cs_ndir);
......@@ -596,9 +596,7 @@ static void ufs_put_cstotal(struct super_block *sb)
usb2 = ubh_get_usb_second(uspi);
usb3 = ubh_get_usb_third(uspi);
if ((mtype == UFS_MOUNT_UFSTYPE_44BSD &&
(usb1->fs_flags & UFS_FLAGS_UPDATED)) ||
mtype == UFS_MOUNT_UFSTYPE_UFS2) {
if (mtype == UFS_MOUNT_UFSTYPE_UFS2) {
/*we have statistic in different place, then usual*/
usb2->fs_un.fs_u2.cs_ndir =
cpu_to_fs64(sb, uspi->cs_total.cs_ndir);
......@@ -608,16 +606,26 @@ static void ufs_put_cstotal(struct super_block *sb)
cpu_to_fs64(sb, uspi->cs_total.cs_nifree);
usb3->fs_un1.fs_u2.cs_nffree =
cpu_to_fs64(sb, uspi->cs_total.cs_nffree);
} else {
usb1->fs_cstotal.cs_ndir =
cpu_to_fs32(sb, uspi->cs_total.cs_ndir);
usb1->fs_cstotal.cs_nbfree =
cpu_to_fs32(sb, uspi->cs_total.cs_nbfree);
usb1->fs_cstotal.cs_nifree =
cpu_to_fs32(sb, uspi->cs_total.cs_nifree);
usb1->fs_cstotal.cs_nffree =
cpu_to_fs32(sb, uspi->cs_total.cs_nffree);
goto out;
}
if (mtype == UFS_MOUNT_UFSTYPE_44BSD &&
(usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) {
/* store stats in both old and new places */
usb2->fs_un.fs_u2.cs_ndir =
cpu_to_fs64(sb, uspi->cs_total.cs_ndir);
usb2->fs_un.fs_u2.cs_nbfree =
cpu_to_fs64(sb, uspi->cs_total.cs_nbfree);
usb3->fs_un1.fs_u2.cs_nifree =
cpu_to_fs64(sb, uspi->cs_total.cs_nifree);
usb3->fs_un1.fs_u2.cs_nffree =
cpu_to_fs64(sb, uspi->cs_total.cs_nffree);
}
usb1->fs_cstotal.cs_ndir = cpu_to_fs32(sb, uspi->cs_total.cs_ndir);
usb1->fs_cstotal.cs_nbfree = cpu_to_fs32(sb, uspi->cs_total.cs_nbfree);
usb1->fs_cstotal.cs_nifree = cpu_to_fs32(sb, uspi->cs_total.cs_nifree);
usb1->fs_cstotal.cs_nffree = cpu_to_fs32(sb, uspi->cs_total.cs_nffree);
out:
ubh_mark_buffer_dirty(USPI_UBH(uspi));
ufs_print_super_stuff(sb, usb1, usb2, usb3);
UFSD("EXIT\n");
......@@ -996,6 +1004,13 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
flags |= UFS_ST_SUN;
}
if ((flags & UFS_ST_MASK) == UFS_ST_44BSD &&
uspi->s_postblformat == UFS_42POSTBLFMT) {
if (!silent)
pr_err("this is not a 44bsd filesystem");
goto failed;
}
/*
* Check ufs magic number
*/
......@@ -1143,8 +1158,8 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
uspi->s_cgmask = fs32_to_cpu(sb, usb1->fs_cgmask);
if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
uspi->s_u2_size = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size);
uspi->s_u2_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize);
uspi->s_size = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size);
uspi->s_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize);
} else {
uspi->s_size = fs32_to_cpu(sb, usb1->fs_size);
uspi->s_dsize = fs32_to_cpu(sb, usb1->fs_dsize);
......@@ -1193,6 +1208,9 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
uspi->s_postbloff = fs32_to_cpu(sb, usb3->fs_postbloff);
uspi->s_rotbloff = fs32_to_cpu(sb, usb3->fs_rotbloff);
uspi->s_root_blocks = mul_u64_u32_div(uspi->s_dsize,
uspi->s_minfree, 100);
/*
* Compute another frequently used values
*/
......@@ -1382,19 +1400,17 @@ static int ufs_statfs(struct dentry *dentry, struct kstatfs *buf)
mutex_lock(&UFS_SB(sb)->s_lock);
usb3 = ubh_get_usb_third(uspi);
if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
buf->f_type = UFS2_MAGIC;
buf->f_blocks = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize);
} else {
else
buf->f_type = UFS_MAGIC;
buf->f_blocks = uspi->s_dsize;
}
buf->f_bfree = ufs_blkstofrags(uspi->cs_total.cs_nbfree) +
uspi->cs_total.cs_nffree;
buf->f_bfree = ufs_freefrags(uspi);
buf->f_ffree = uspi->cs_total.cs_nifree;
buf->f_bsize = sb->s_blocksize;
buf->f_bavail = (buf->f_bfree > (((long)buf->f_blocks / 100) * uspi->s_minfree))
? (buf->f_bfree - (((long)buf->f_blocks / 100) * uspi->s_minfree)) : 0;
buf->f_bavail = (buf->f_bfree > uspi->s_root_blocks)
? (buf->f_bfree - uspi->s_root_blocks) : 0;
buf->f_files = uspi->s_ncg * uspi->s_ipg;
buf->f_namelen = UFS_MAXNAMLEN;
buf->f_fsid.val[0] = (u32)id;
......
......@@ -733,10 +733,8 @@ struct ufs_sb_private_info {
__u32 s_dblkno; /* offset of first data after cg */
__u32 s_cgoffset; /* cylinder group offset in cylinder */
__u32 s_cgmask; /* used to calc mod fs_ntrak */
__u32 s_size; /* number of blocks (fragments) in fs */
__u32 s_dsize; /* number of data blocks in fs */
__u64 s_u2_size; /* ufs2: number of blocks (fragments) in fs */
__u64 s_u2_dsize; /*ufs2: number of data blocks in fs */
__u64 s_size; /* number of blocks (fragments) in fs */
__u64 s_dsize; /* number of data blocks in fs */
__u32 s_ncg; /* number of cylinder groups */
__u32 s_bsize; /* size of basic blocks */
__u32 s_fsize; /* size of fragments */
......@@ -793,6 +791,7 @@ struct ufs_sb_private_info {
__u32 s_maxsymlinklen;/* upper limit on fast symlinks' size */
__s32 fs_magic; /* filesystem magic */
unsigned int s_dirblksize;
__u64 s_root_blocks;
};
/*
......
......@@ -243,9 +243,8 @@ ufs_set_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi, dev_t dev
struct page *ufs_get_locked_page(struct address_space *mapping,
pgoff_t index)
{
struct page *page;
page = find_lock_page(mapping, index);
struct inode *inode = mapping->host;
struct page *page = find_lock_page(mapping, index);
if (!page) {
page = read_mapping_page(mapping, index, NULL);
......@@ -253,7 +252,7 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
printk(KERN_ERR "ufs_change_blocknr: "
"read_mapping_page error: ino %lu, index: %lu\n",
mapping->host->i_ino, index);
goto out;
return page;
}
lock_page(page);
......@@ -262,8 +261,7 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
/* Truncate got there first */
unlock_page(page);
put_page(page);
page = NULL;
goto out;
return NULL;
}
if (!PageUptodate(page) || PageError(page)) {
......@@ -272,11 +270,12 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
printk(KERN_ERR "ufs_change_blocknr: "
"can not read page: ino %lu, index: %lu\n",
mapping->host->i_ino, index);
inode->i_ino, index);
page = ERR_PTR(-EIO);
return ERR_PTR(-EIO);
}
}
out:
if (!page_has_buffers(page))
create_empty_buffers(page, 1 << inode->i_blkbits, 0);
return page;
}
......@@ -350,16 +350,11 @@ static inline void *ubh_get_data_ptr(struct ufs_sb_private_info *uspi,
#define ubh_blkmap(ubh,begin,bit) \
((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb)))
/*
* Determine the number of available frags given a
* percentage to hold in reserve.
*/
static inline u64
ufs_freespace(struct ufs_sb_private_info *uspi, int percentreserved)
ufs_freefrags(struct ufs_sb_private_info *uspi)
{
return ufs_blkstofrags(uspi->cs_total.cs_nbfree) +
uspi->cs_total.cs_nffree -
(uspi->s_dsize * (percentreserved) / 100);
uspi->cs_total.cs_nffree;
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment