Commit c4755d16 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (48 commits)
  ext4: fix hot spins in mballoc after err_freebuddy and err_freemeta
  ext4: fix test ext_generic_write_end() copied return value
  ext3: fix test ext_generic_write_end() copied return value
  ext4: Move mballoc headers/structures to a seperate header file mballoc.h
  ext4: cleanup for compiling mballoc with verification and debugging #defines
  ext4: don't use ext4_error in ext4_check_descriptors
  ext4: mark inode dirty after initializing the extent tree
  ext4: update ctime and mtime for truncate with extents.
  ext4: Don't do GFP_NOFS allocations after taking ext4_lock_group
  ext4: move headers out of include/linux
  ext4: fix wrong gfp type under transaction
  ext4: Fix hang on umount with quotas when journal is aborted
  ext4: Fix update of mtime and ctime on rename
  jdb2: replace remaining __FUNCTION__ occurrences
  ext4: replace remaining __FUNCTION__ occurrences
  jbd2: only create debugfs and stats entries if init is successful
  jbd2: fix kernel-doc notation
  jbd2: replace potentially false assertion with if block
  jbd2: eliminate duplicated code in revocation table init/destroy functions
  jbd2: tidy up revoke cache initialisation and destruction
  ...
parents c15a2434 f1fa3342
......@@ -35,6 +35,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
* zero-initialized data and COW.
*/
struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
/*
* The pmd table for the upper-most set of pages.
......
......@@ -69,6 +69,7 @@ void __init m68k_setup_node(int node)
*/
void *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
void show_mem(void)
{
......
......@@ -282,3 +282,5 @@ EXPORT_SYMBOL(do_BUG);
/* Sun Power Management Idle Handler */
EXPORT_SYMBOL(pm_idle);
EXPORT_SYMBOL(empty_zero_page);
......@@ -160,6 +160,7 @@ extern unsigned int sparc_ramdisk_image;
extern unsigned int sparc_ramdisk_size;
struct page *mem_map_zero __read_mostly;
EXPORT_SYMBOL(mem_map_zero);
unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
......
......@@ -1261,10 +1261,11 @@ static int ext3_ordered_write_end(struct file *file,
new_i_size = pos + copied;
if (new_i_size > EXT3_I(inode)->i_disksize)
EXT3_I(inode)->i_disksize = new_i_size;
copied = ext3_generic_write_end(file, mapping, pos, len, copied,
ret2 = ext3_generic_write_end(file, mapping, pos, len, copied,
page, fsdata);
if (copied < 0)
ret = copied;
copied = ret2;
if (ret2 < 0)
ret = ret2;
}
ret2 = ext3_journal_stop(handle);
if (!ret)
......@@ -1289,10 +1290,11 @@ static int ext3_writeback_write_end(struct file *file,
if (new_i_size > EXT3_I(inode)->i_disksize)
EXT3_I(inode)->i_disksize = new_i_size;
copied = ext3_generic_write_end(file, mapping, pos, len, copied,
ret2 = ext3_generic_write_end(file, mapping, pos, len, copied,
page, fsdata);
if (copied < 0)
ret = copied;
copied = ret2;
if (ret2 < 0)
ret = ret2;
ret2 = ext3_journal_stop(handle);
if (!ret)
......
......@@ -9,8 +9,8 @@
#include <linux/slab.h>
#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/ext4_jbd2.h>
#include <linux/ext4_fs.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "xattr.h"
#include "acl.h"
......@@ -37,7 +37,7 @@ ext4_acl_from_disk(const void *value, size_t size)
return ERR_PTR(-EINVAL);
if (count == 0)
return NULL;
acl = posix_acl_alloc(count, GFP_KERNEL);
acl = posix_acl_alloc(count, GFP_NOFS);
if (!acl)
return ERR_PTR(-ENOMEM);
for (n=0; n < count; n++) {
......@@ -91,7 +91,7 @@ ext4_acl_to_disk(const struct posix_acl *acl, size_t *size)
*size = ext4_acl_size(acl->a_count);
ext_acl = kmalloc(sizeof(ext4_acl_header) + acl->a_count *
sizeof(ext4_acl_entry), GFP_KERNEL);
sizeof(ext4_acl_entry), GFP_NOFS);
if (!ext_acl)
return ERR_PTR(-ENOMEM);
ext_acl->a_version = cpu_to_le32(EXT4_ACL_VERSION);
......@@ -187,7 +187,7 @@ ext4_get_acl(struct inode *inode, int type)
}
retval = ext4_xattr_get(inode, name_index, "", NULL, 0);
if (retval > 0) {
value = kmalloc(retval, GFP_KERNEL);
value = kmalloc(retval, GFP_NOFS);
if (!value)
return ERR_PTR(-ENOMEM);
retval = ext4_xattr_get(inode, name_index, "", value, retval);
......@@ -335,7 +335,7 @@ ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
if (error)
goto cleanup;
}
clone = posix_acl_clone(acl, GFP_KERNEL);
clone = posix_acl_clone(acl, GFP_NOFS);
error = -ENOMEM;
if (!clone)
goto cleanup;
......
......@@ -15,12 +15,12 @@
#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/jbd2.h>
#include <linux/ext4_fs.h>
#include <linux/ext4_jbd2.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include "ext4.h"
#include "ext4_jbd2.h"
#include "group.h"
/*
* balloc.c contains the blocks allocation and deallocation routines
*/
......@@ -48,7 +48,6 @@ void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
ext4_group_t block_group, struct ext4_group_desc *gdp)
{
unsigned long start;
int bit, bit_max;
unsigned free_blocks, group_blocks;
struct ext4_sb_info *sbi = EXT4_SB(sb);
......@@ -59,7 +58,7 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
/* If checksum is bad mark all blocks used to prevent allocation
* essentially implementing a per-group read-only flag. */
if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
ext4_error(sb, __FUNCTION__,
ext4_error(sb, __func__,
"Checksum bad for group %lu\n", block_group);
gdp->bg_free_blocks_count = 0;
gdp->bg_free_inodes_count = 0;
......@@ -106,11 +105,12 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
free_blocks = group_blocks - bit_max;
if (bh) {
ext4_fsblk_t start;
for (bit = 0; bit < bit_max; bit++)
ext4_set_bit(bit, bh->b_data);
start = block_group * EXT4_BLOCKS_PER_GROUP(sb) +
le32_to_cpu(sbi->s_es->s_first_data_block);
start = ext4_group_first_block_no(sb, block_group);
/* Set bits for block and inode bitmaps, and inode table */
ext4_set_bit(ext4_block_bitmap(sb, gdp) - start, bh->b_data);
......@@ -235,7 +235,7 @@ static int ext4_valid_block_bitmap(struct super_block *sb,
return 1;
err_out:
ext4_error(sb, __FUNCTION__,
ext4_error(sb, __func__,
"Invalid block bitmap - "
"block_group = %d, block = %llu",
block_group, bitmap_blk);
......@@ -264,7 +264,7 @@ read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
bitmap_blk = ext4_block_bitmap(sb, desc);
bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) {
ext4_error(sb, __FUNCTION__,
ext4_error(sb, __func__,
"Cannot read block bitmap - "
"block_group = %d, block_bitmap = %llu",
(int)block_group, (unsigned long long)bitmap_blk);
......@@ -281,7 +281,7 @@ read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
}
if (bh_submit_read(bh) < 0) {
put_bh(bh);
ext4_error(sb, __FUNCTION__,
ext4_error(sb, __func__,
"Cannot read block bitmap - "
"block_group = %d, block_bitmap = %llu",
(int)block_group, (unsigned long long)bitmap_blk);
......@@ -360,7 +360,7 @@ static void __rsv_window_dump(struct rb_root *root, int verbose,
BUG();
}
#define rsv_window_dump(root, verbose) \
__rsv_window_dump((root), (verbose), __FUNCTION__)
__rsv_window_dump((root), (verbose), __func__)
#else
#define rsv_window_dump(root, verbose) do {} while (0)
#endif
......@@ -740,7 +740,7 @@ void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb,
if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
bit + i, bitmap_bh->b_data)) {
jbd_unlock_bh_state(bitmap_bh);
ext4_error(sb, __FUNCTION__,
ext4_error(sb, __func__,
"bit already cleared for block %llu",
(ext4_fsblk_t)(block + i));
jbd_lock_bh_state(bitmap_bh);
......@@ -752,9 +752,7 @@ void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb,
jbd_unlock_bh_state(bitmap_bh);
spin_lock(sb_bgl_lock(sbi, block_group));
desc->bg_free_blocks_count =
cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) +
group_freed);
le16_add_cpu(&desc->bg_free_blocks_count, group_freed);
desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
spin_unlock(sb_bgl_lock(sbi, block_group));
percpu_counter_add(&sbi->s_freeblocks_counter, count);
......@@ -1798,7 +1796,7 @@ ext4_fsblk_t ext4_new_blocks_old(handle_t *handle, struct inode *inode,
if (ext4_test_bit(grp_alloc_blk+i,
bh2jh(bitmap_bh)->b_committed_data)) {
printk("%s: block was unexpectedly set in "
"b_committed_data\n", __FUNCTION__);
"b_committed_data\n", __func__);
}
}
}
......@@ -1823,8 +1821,7 @@ ext4_fsblk_t ext4_new_blocks_old(handle_t *handle, struct inode *inode,
spin_lock(sb_bgl_lock(sbi, group_no));
if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
gdp->bg_free_blocks_count =
cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num);
le16_add_cpu(&gdp->bg_free_blocks_count, -num);
gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);
spin_unlock(sb_bgl_lock(sbi, group_no));
percpu_counter_sub(&sbi->s_freeblocks_counter, num);
......
......@@ -9,7 +9,7 @@
#include <linux/buffer_head.h>
#include <linux/jbd2.h>
#include <linux/ext4_fs.h>
#include "ext4.h"
#ifdef EXT4FS_DEBUG
......
......@@ -23,10 +23,10 @@
#include <linux/fs.h>
#include <linux/jbd2.h>
#include <linux/ext4_fs.h>
#include <linux/buffer_head.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include "ext4.h"
static unsigned char ext4_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
......@@ -42,7 +42,7 @@ const struct file_operations ext4_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.readdir = ext4_readdir, /* we take BKL. needed?*/
.ioctl = ext4_ioctl, /* BKL held */
.unlocked_ioctl = ext4_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ext4_compat_ioctl,
#endif
......
/*
* linux/include/linux/ext4_fs.h
* ext4.h
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
......@@ -13,14 +13,13 @@
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#ifndef _LINUX_EXT4_FS_H
#define _LINUX_EXT4_FS_H
#ifndef _EXT4_H
#define _EXT4_H
#include <linux/types.h>
#include <linux/blkdev.h>
#include <linux/magic.h>
#include <linux/ext4_fs_i.h>
#include "ext4_i.h"
/*
* The second extended filesystem constants/structures
......@@ -176,8 +175,7 @@ struct ext4_group_desc
#define EXT4_BG_INODE_ZEROED 0x0004 /* On-disk itable initialized to zero */
#ifdef __KERNEL__
#include <linux/ext4_fs_i.h>
#include <linux/ext4_fs_sb.h>
#include "ext4_sb.h"
#endif
/*
* Macro-instructions used to manage group descriptors
......@@ -231,6 +229,7 @@ struct ext4_group_desc
#define EXT4_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
#define EXT4_HUGE_FILE_FL 0x00040000 /* Set to each huge file */
#define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */
#define EXT4_EXT_MIGRATE 0x00100000 /* Inode is migrating */
#define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */
#define EXT4_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */
......@@ -1049,8 +1048,7 @@ extern int ext4_block_truncate_page(handle_t *handle, struct page *page,
struct address_space *mapping, loff_t from);
/* ioctl.c */
extern int ext4_ioctl (struct inode *, struct file *, unsigned int,
unsigned long);
extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
extern long ext4_compat_ioctl (struct file *, unsigned int, unsigned long);
/* migrate.c */
......@@ -1204,4 +1202,4 @@ extern int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode,
int extend_disksize);
#endif /* __KERNEL__ */
#endif /* _LINUX_EXT4_FS_H */
#endif /* _EXT4_H */
......@@ -16,10 +16,10 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
*/
#ifndef _LINUX_EXT4_EXTENTS
#define _LINUX_EXT4_EXTENTS
#ifndef _EXT4_EXTENTS
#define _EXT4_EXTENTS
#include <linux/ext4_fs.h>
#include "ext4.h"
/*
* With AGGRESSIVE_TEST defined, the capacity of index/leaf blocks
......@@ -228,5 +228,5 @@ extern int ext4_ext_search_left(struct inode *, struct ext4_ext_path *,
extern int ext4_ext_search_right(struct inode *, struct ext4_ext_path *,
ext4_lblk_t *, ext4_fsblk_t *);
extern void ext4_ext_drop_refs(struct ext4_ext_path *);
#endif /* _LINUX_EXT4_EXTENTS */
#endif /* _EXT4_EXTENTS */
/*
* linux/include/linux/ext4_fs_i.h
* ext4_i.h
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
......@@ -13,8 +13,8 @@
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#ifndef _LINUX_EXT4_FS_I
#define _LINUX_EXT4_FS_I
#ifndef _EXT4_I
#define _EXT4_I
#include <linux/rwsem.h>
#include <linux/rbtree.h>
......@@ -164,4 +164,4 @@ struct ext4_inode_info {
spinlock_t i_prealloc_lock;
};
#endif /* _LINUX_EXT4_FS_I */
#endif /* _EXT4_I */
......@@ -2,14 +2,14 @@
* Interface between ext4 and JBD
*/
#include <linux/ext4_jbd2.h>
#include "ext4_jbd2.h"
int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
struct buffer_head *bh)
{
int err = jbd2_journal_get_undo_access(handle, bh);
if (err)
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
ext4_journal_abort_handle(where, __func__, bh, handle, err);
return err;
}
......@@ -18,7 +18,7 @@ int __ext4_journal_get_write_access(const char *where, handle_t *handle,
{
int err = jbd2_journal_get_write_access(handle, bh);
if (err)
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
ext4_journal_abort_handle(where, __func__, bh, handle, err);
return err;
}
......@@ -27,7 +27,7 @@ int __ext4_journal_forget(const char *where, handle_t *handle,
{
int err = jbd2_journal_forget(handle, bh);
if (err)
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
ext4_journal_abort_handle(where, __func__, bh, handle, err);
return err;
}
......@@ -36,7 +36,7 @@ int __ext4_journal_revoke(const char *where, handle_t *handle,
{
int err = jbd2_journal_revoke(handle, blocknr, bh);
if (err)
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
ext4_journal_abort_handle(where, __func__, bh, handle, err);
return err;
}
......@@ -45,7 +45,7 @@ int __ext4_journal_get_create_access(const char *where,
{
int err = jbd2_journal_get_create_access(handle, bh);
if (err)
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
ext4_journal_abort_handle(where, __func__, bh, handle, err);
return err;
}
......@@ -54,6 +54,6 @@ int __ext4_journal_dirty_metadata(const char *where,
{
int err = jbd2_journal_dirty_metadata(handle, bh);
if (err)
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
ext4_journal_abort_handle(where, __func__, bh, handle, err);
return err;
}
/*
* linux/include/linux/ext4_jbd2.h
* ext4_jbd2.h
*
* Written by Stephen C. Tweedie <sct@redhat.com>, 1999
*
......@@ -12,12 +12,12 @@
* Ext4-specific journaling extensions.
*/
#ifndef _LINUX_EXT4_JBD2_H
#define _LINUX_EXT4_JBD2_H
#ifndef _EXT4_JBD2_H
#define _EXT4_JBD2_H
#include <linux/fs.h>
#include <linux/jbd2.h>
#include <linux/ext4_fs.h>
#include "ext4.h"
#define EXT4_JOURNAL(inode) (EXT4_SB((inode)->i_sb)->s_journal)
......@@ -228,4 +228,4 @@ static inline int ext4_should_writeback_data(struct inode *inode)
return 0;
}
#endif /* _LINUX_EXT4_JBD2_H */
#endif /* _EXT4_JBD2_H */
/*
* linux/include/linux/ext4_fs_sb.h
* ext4_sb.h
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
......@@ -13,8 +13,8 @@
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#ifndef _LINUX_EXT4_FS_SB
#define _LINUX_EXT4_FS_SB
#ifndef _EXT4_SB
#define _EXT4_SB
#ifdef __KERNEL__
#include <linux/timer.h>
......@@ -145,4 +145,4 @@ struct ext4_sb_info {
struct ext4_locality_group *s_locality_groups;
};
#endif /* _LINUX_EXT4_FS_SB */
#endif /* _EXT4_SB */
This diff is collapsed.
......@@ -21,8 +21,8 @@
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/jbd2.h>
#include <linux/ext4_fs.h>
#include <linux/ext4_jbd2.h>
#include "ext4.h"
#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"
......@@ -129,7 +129,7 @@ const struct file_operations ext4_file_operations = {
.write = do_sync_write,
.aio_read = generic_file_aio_read,
.aio_write = ext4_file_write,
.ioctl = ext4_ioctl,
.unlocked_ioctl = ext4_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ext4_compat_ioctl,
#endif
......
......@@ -27,8 +27,8 @@
#include <linux/sched.h>
#include <linux/writeback.h>
#include <linux/jbd2.h>
#include <linux/ext4_fs.h>
#include <linux/ext4_jbd2.h>
#include "ext4.h"
#include "ext4_jbd2.h"
/*
* akpm: A new design for ext4_sync_file().
......@@ -72,6 +72,9 @@ int ext4_sync_file(struct file * file, struct dentry *dentry, int datasync)
goto out;
}
if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
goto out;
/*
* The VFS has written the file data. If the inode is unaltered
* then we need not start a commit.
......
......@@ -11,8 +11,8 @@
#include <linux/fs.h>
#include <linux/jbd2.h>
#include <linux/ext4_fs.h>
#include <linux/cryptohash.h>
#include "ext4.h"
#define DELTA 0x9E3779B9
......
......@@ -15,8 +15,6 @@
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/jbd2.h>
#include <linux/ext4_fs.h>
#include <linux/ext4_jbd2.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/quotaops.h>
......@@ -25,7 +23,8 @@
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <asm/byteorder.h>
#include "ext4.h"
#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"
#include "group.h"
......@@ -75,7 +74,7 @@ unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
/* If checksum is bad mark all blocks and inodes use to prevent
* allocation, essentially implementing a per-group read-only flag. */
if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
ext4_error(sb, __FUNCTION__, "Checksum bad for group %lu\n",
ext4_error(sb, __func__, "Checksum bad for group %lu\n",
block_group);
gdp->bg_free_blocks_count = 0;
gdp->bg_free_inodes_count = 0;
......@@ -223,11 +222,9 @@ void ext4_free_inode (handle_t *handle, struct inode * inode)
if (gdp) {
spin_lock(sb_bgl_lock(sbi, block_group));
gdp->bg_free_inodes_count = cpu_to_le16(
le16_to_cpu(gdp->bg_free_inodes_count) + 1);
le16_add_cpu(&gdp->bg_free_inodes_count, 1);
if (is_directory)
gdp->bg_used_dirs_count = cpu_to_le16(
le16_to_cpu(gdp->bg_used_dirs_count) - 1);
le16_add_cpu(&gdp->bg_used_dirs_count, -1);
gdp->bg_checksum = ext4_group_desc_csum(sbi,
block_group, gdp);
spin_unlock(sb_bgl_lock(sbi, block_group));
......@@ -588,7 +585,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode)
ino++;
if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
ino > EXT4_INODES_PER_GROUP(sb)) {
ext4_error(sb, __FUNCTION__,
ext4_error(sb, __func__,
"reserved inode or inode > inodes count - "
"block_group = %lu, inode=%lu", group,
ino + group * EXT4_INODES_PER_GROUP(sb));
......@@ -664,11 +661,9 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode)
cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino);
}
gdp->bg_free_inodes_count =
cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
le16_add_cpu(&gdp->bg_free_inodes_count, -1);
if (S_ISDIR(mode)) {
gdp->bg_used_dirs_count =
cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
le16_add_cpu(&gdp->bg_used_dirs_count, 1);
}
gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
spin_unlock(sb_bgl_lock(sbi, group));
......@@ -744,21 +739,22 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode)
if (err)
goto fail_free_drop;
err = ext4_mark_inode_dirty(handle, inode);
if (err) {
ext4_std_error(sb, err);
goto fail_free_drop;
}
if (test_opt(sb, EXTENTS)) {
/* set extent flag only for directory and file */
if (S_ISDIR(mode) || S_ISREG(mode)) {
/* set extent flag only for diretory, file and normal symlink*/
if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
ext4_ext_tree_init(handle, inode);
err = ext4_update_incompat_feature(handle, sb,
EXT4_FEATURE_INCOMPAT_EXTENTS);
if (err)
goto fail;
goto fail_free_drop;
}
}
err = ext4_mark_inode_dirty(handle, inode);
if (err) {
ext4_std_error(sb, err);
goto fail_free_drop;
}
ext4_debug("allocating inode %lu\n", inode->i_ino);
......@@ -796,7 +792,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
/* Error cases - e2fsck has already cleaned up for us */
if (ino > max_ino) {
ext4_warning(sb, __FUNCTION__,
ext4_warning(sb, __func__,
"bad orphan ino %lu! e2fsck was run?", ino);
goto error;
}
......@@ -805,7 +801,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
bitmap_bh = read_inode_bitmap(sb, block_group);
if (!bitmap_bh) {
ext4_warning(sb, __FUNCTION__,
ext4_warning(sb, __func__,
"inode bitmap error for orphan %lu", ino);
goto error;
}
......@@ -830,7 +826,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
err = PTR_ERR(inode);
inode = NULL;
bad_orphan:
ext4_warning(sb, __FUNCTION__,
ext4_warning(sb, __func__,
"bad orphan inode %lu! e2fsck was run?", ino);
printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
bit, (unsigned long long)bitmap_bh->b_blocknr,
......
......@@ -25,7 +25,6 @@
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/ext4_jbd2.h>
#include <linux/jbd2.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
......@@ -36,6 +35,7 @@
#include <linux/mpage.h>
#include <linux/uio.h>
#include <linux/bio.h>
#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"
......@@ -93,7 +93,7 @@ int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
BUFFER_TRACE(bh, "call ext4_journal_revoke");
err = ext4_journal_revoke(handle, blocknr, bh);
if (err)
ext4_abort(inode->i_sb, __FUNCTION__,
ext4_abort(inode->i_sb, __func__,
"error %d when attempting revoke", err);
BUFFER_TRACE(bh, "exit");
return err;
......@@ -985,6 +985,16 @@ int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
} else {
retval = ext4_get_blocks_handle(handle, inode, block,
max_blocks, bh, create, extend_disksize);
if (retval > 0 && buffer_new(bh)) {
/*
* We allocated new blocks which will result in
* i_data's format changing. Force the migrate
* to fail by clearing migrate flags
*/
EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags &
~EXT4_EXT_MIGRATE;
}
}
up_write((&EXT4_I(inode)->i_data_sem));
return retval;
......@@ -1230,7 +1240,7 @@ int ext4_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
{
int err = jbd2_journal_dirty_data(handle, bh);
if (err)
ext4_journal_abort_handle(__FUNCTION__, __FUNCTION__,
ext4_journal_abort_handle(__func__, __func__,
bh, handle, err);
return err;
}
......@@ -1301,10 +1311,11 @@ static int ext4_ordered_write_end(struct file *file,
new_i_size = pos + copied;
if (new_i_size > EXT4_I(inode)->i_disksize)
EXT4_I(inode)->i_disksize = new_i_size;
copied = ext4_generic_write_end(file, mapping, pos, len, copied,
ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
page, fsdata);
if (copied < 0)
ret = copied;
copied = ret2;
if (ret2 < 0)
ret = ret2;
}
ret2 = ext4_journal_stop(handle);
if (!ret)
......@@ -1329,10 +1340,11 @@ static int ext4_writeback_write_end(struct file *file,
if (new_i_size > EXT4_I(inode)->i_disksize)
EXT4_I(inode)->i_disksize = new_i_size;
copied = ext4_generic_write_end(file, mapping, pos, len, copied,
ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
page, fsdata);
if (copied < 0)
ret = copied;
copied = ret2;
if (ret2 < 0)
ret = ret2;
ret2 = ext4_journal_stop(handle);
if (!ret)
......@@ -2501,12 +2513,10 @@ void ext4_truncate(struct inode *inode)
static ext4_fsblk_t ext4_get_inode_block(struct super_block *sb,
unsigned long ino, struct ext4_iloc *iloc)
{
unsigned long desc, group_desc;
ext4_group_t block_group;
unsigned long offset;
ext4_fsblk_t block;
struct buffer_head *bh;
struct ext4_group_desc * gdp;
struct ext4_group_desc *gdp;
if (!ext4_valid_inum(sb, ino)) {
/*
......@@ -2518,22 +2528,10 @@ static ext4_fsblk_t ext4_get_inode_block(struct super_block *sb,
}
block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
if (block_group >= EXT4_SB(sb)->s_groups_count) {
ext4_error(sb,"ext4_get_inode_block","group >= groups count");
gdp = ext4_get_group_desc(sb, block_group, NULL);
if (!gdp)
return 0;
}
smp_rmb();
group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
desc = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
bh = EXT4_SB(sb)->s_group_desc[group_desc];
if (!bh) {
ext4_error (sb, "ext4_get_inode_block",
"Descriptor not loaded");
return 0;
}
gdp = (struct ext4_group_desc *)((__u8 *)bh->b_data +
desc * EXT4_DESC_SIZE(sb));
/*
* Figure out the offset within the block group inode table
*/
......@@ -2976,7 +2974,8 @@ static int ext4_do_update_inode(handle_t *handle,
if (ext4_inode_blocks_set(handle, raw_inode, ei))
goto out_brelse;
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
raw_inode->i_flags = cpu_to_le32(ei->i_flags);
/* clear the migrate flag in the raw_inode */
raw_inode->i_flags = cpu_to_le32(ei->i_flags & ~EXT4_EXT_MIGRATE);
if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
cpu_to_le32(EXT4_OS_HURD))
raw_inode->i_file_acl_high =
......@@ -3374,7 +3373,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
if (mnt_count !=
le16_to_cpu(sbi->s_es->s_mnt_count)) {
ext4_warning(inode->i_sb, __FUNCTION__,
ext4_warning(inode->i_sb, __func__,
"Unable to expand inode %lu. Delete"
" some EAs or run e2fsck.",
inode->i_ino);
......@@ -3415,7 +3414,7 @@ void ext4_dirty_inode(struct inode *inode)
current_handle->h_transaction != handle->h_transaction) {
/* This task has a transaction open against a different fs */
printk(KERN_EMERG "%s: transactions do not match!\n",
__FUNCTION__);
__func__);
} else {
jbd_debug(5, "marking dirty. outer handle=%p\n",
current_handle);
......
......@@ -10,17 +10,17 @@
#include <linux/fs.h>
#include <linux/jbd2.h>
#include <linux/capability.h>
#include <linux/ext4_fs.h>
#include <linux/ext4_jbd2.h>
#include <linux/time.h>
#include <linux/compat.h>
#include <linux/smp_lock.h>
#include <linux/mount.h>
#include <asm/uaccess.h>
#include "ext4_jbd2.h"
#include "ext4.h"
int ext4_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
unsigned long arg)
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = filp->f_dentry->d_inode;
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int flags;
unsigned short rsv_window_size;
......@@ -277,9 +277,6 @@ int ext4_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
#ifdef CONFIG_COMPAT
long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file->f_path.dentry->d_inode;
int ret;
/* These are just misnamed, they actually get/put from/to user an int */
switch (cmd) {
case EXT4_IOC32_GETFLAGS:
......@@ -319,9 +316,6 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
default:
return -ENOIOCTLCMD;
}
lock_kernel();
ret = ext4_ioctl(inode, file, cmd, (unsigned long) compat_ptr(arg));
unlock_kernel();
return ret;
return ext4_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
}
#endif
This diff is collapsed.
/*
* fs/ext4/mballoc.h
*
* Written by: Alex Tomas <alex@clusterfs.com>
*
*/
#ifndef _EXT4_MBALLOC_H
#define _EXT4_MBALLOC_H
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include <linux/module.h>
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/pagemap.h>
#include <linux/seq_file.h>
#include <linux/version.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "group.h"
/*
* with AGGRESSIVE_CHECK allocator runs consistency checks over
* structures. these checks slow things down a lot
*/
#define AGGRESSIVE_CHECK__
/*
* with DOUBLE_CHECK defined mballoc creates persistent in-core
* bitmaps, maintains and uses them to check for double allocations
*/
#define DOUBLE_CHECK__
/*
*/
#define MB_DEBUG__
#ifdef MB_DEBUG
#define mb_debug(fmt, a...) printk(fmt, ##a)
#else
#define mb_debug(fmt, a...)
#endif
/*
* with EXT4_MB_HISTORY mballoc stores last N allocations in memory
* and you can monitor it in /proc/fs/ext4/<dev>/mb_history
*/
#define EXT4_MB_HISTORY
#define EXT4_MB_HISTORY_ALLOC 1 /* allocation */
#define EXT4_MB_HISTORY_PREALLOC 2 /* preallocated blocks used */
#define EXT4_MB_HISTORY_DISCARD 4 /* preallocation discarded */
#define EXT4_MB_HISTORY_FREE 8 /* free */
#define EXT4_MB_HISTORY_DEFAULT (EXT4_MB_HISTORY_ALLOC | \
EXT4_MB_HISTORY_PREALLOC)
/*
* How long mballoc can look for a best extent (in found extents)
*/
#define MB_DEFAULT_MAX_TO_SCAN 200
/*
* How long mballoc must look for a best extent
*/
#define MB_DEFAULT_MIN_TO_SCAN 10
/*
* How many groups mballoc will scan looking for the best chunk
*/
#define MB_DEFAULT_MAX_GROUPS_TO_SCAN 5
/*
* with 'ext4_mb_stats' allocator will collect stats that will be
* shown at umount. The collecting costs though!
*/
#define MB_DEFAULT_STATS 1
/*
* files smaller than MB_DEFAULT_STREAM_THRESHOLD are served
* by the stream allocator, which purpose is to pack requests
* as close each to other as possible to produce smooth I/O traffic
* We use locality group prealloc space for stream request.
* We can tune the same via /proc/fs/ext4/<parition>/stream_req
*/
#define MB_DEFAULT_STREAM_THRESHOLD 16 /* 64K */
/*
* for which requests use 2^N search using buddies
*/
#define MB_DEFAULT_ORDER2_REQS 2
/*
* default group prealloc size 512 blocks
*/
#define MB_DEFAULT_GROUP_PREALLOC 512
static struct kmem_cache *ext4_pspace_cachep;
static struct kmem_cache *ext4_ac_cachep;
#ifdef EXT4_BB_MAX_BLOCKS
#undef EXT4_BB_MAX_BLOCKS
#endif
#define EXT4_BB_MAX_BLOCKS 30
struct ext4_free_metadata {
ext4_group_t group;
unsigned short num;
ext4_grpblk_t blocks[EXT4_BB_MAX_BLOCKS];
struct list_head list;
};
struct ext4_group_info {
unsigned long bb_state;
unsigned long bb_tid;
struct ext4_free_metadata *bb_md_cur;
unsigned short bb_first_free;
unsigned short bb_free;
unsigned short bb_fragments;
struct list_head bb_prealloc_list;
#ifdef DOUBLE_CHECK
void *bb_bitmap;
#endif
unsigned short bb_counters[];
};
#define EXT4_GROUP_INFO_NEED_INIT_BIT 0
#define EXT4_GROUP_INFO_LOCKED_BIT 1
#define EXT4_MB_GRP_NEED_INIT(grp) \
(test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
struct ext4_prealloc_space {
struct list_head pa_inode_list;
struct list_head pa_group_list;
union {
struct list_head pa_tmp_list;
struct rcu_head pa_rcu;
} u;
spinlock_t pa_lock;
atomic_t pa_count;
unsigned pa_deleted;
ext4_fsblk_t pa_pstart; /* phys. block */
ext4_lblk_t pa_lstart; /* log. block */
unsigned short pa_len; /* len of preallocated chunk */
unsigned short pa_free; /* how many blocks are free */
unsigned short pa_linear; /* consumed in one direction
* strictly, for grp prealloc */
spinlock_t *pa_obj_lock;
struct inode *pa_inode; /* hack, for history only */
};
struct ext4_free_extent {
ext4_lblk_t fe_logical;
ext4_grpblk_t fe_start;
ext4_group_t fe_group;
int fe_len;
};
/*
* Locality group:
* we try to group all related changes together
* so that writeback can flush/allocate them together as well
*/
struct ext4_locality_group {
/* for allocator */
struct mutex lg_mutex; /* to serialize allocates */
struct list_head lg_prealloc_list;/* list of preallocations */
spinlock_t lg_prealloc_lock;
};
struct ext4_allocation_context {
struct inode *ac_inode;
struct super_block *ac_sb;
/* original request */
struct ext4_free_extent ac_o_ex;
/* goal request (after normalization) */
struct ext4_free_extent ac_g_ex;
/* the best found extent */
struct ext4_free_extent ac_b_ex;
/* copy of the bext found extent taken before preallocation efforts */
struct ext4_free_extent ac_f_ex;
/* number of iterations done. we have to track to limit searching */
unsigned long ac_ex_scanned;
__u16 ac_groups_scanned;
__u16 ac_found;
__u16 ac_tail;
__u16 ac_buddy;
__u16 ac_flags; /* allocation hints */
__u8 ac_status;
__u8 ac_criteria;
__u8 ac_repeats;
__u8 ac_2order; /* if request is to allocate 2^N blocks and
* N > 0, the field stores N, otherwise 0 */
__u8 ac_op; /* operation, for history only */
struct page *ac_bitmap_page;
struct page *ac_buddy_page;
struct ext4_prealloc_space *ac_pa;
struct ext4_locality_group *ac_lg;
};
#define AC_STATUS_CONTINUE 1
#define AC_STATUS_FOUND 2
#define AC_STATUS_BREAK 3
struct ext4_mb_history {
struct ext4_free_extent orig; /* orig allocation */
struct ext4_free_extent goal; /* goal allocation */
struct ext4_free_extent result; /* result allocation */
unsigned pid;
unsigned ino;
__u16 found; /* how many extents have been found */
__u16 groups; /* how many groups have been scanned */
__u16 tail; /* what tail broke some buddy */
__u16 buddy; /* buddy the tail ^^^ broke */
__u16 flags;
__u8 cr:3; /* which phase the result extent was found at */
__u8 op:4;
__u8 merged:1;
};
struct ext4_buddy {
struct page *bd_buddy_page;
void *bd_buddy;
struct page *bd_bitmap_page;
void *bd_bitmap;
struct ext4_group_info *bd_info;
struct super_block *bd_sb;
__u16 bd_blkbits;
ext4_group_t bd_group;
};
#define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap)
#define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy)
#ifndef EXT4_MB_HISTORY
static inline void ext4_mb_store_history(struct ext4_allocation_context *ac)
{
return;
}
#else
static void ext4_mb_store_history(struct ext4_allocation_context *ac);
#endif
#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
static struct proc_dir_entry *proc_root_ext4;
struct buffer_head *read_block_bitmap(struct super_block *, ext4_group_t);
static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
ext4_group_t group);
static void ext4_mb_poll_new_transaction(struct super_block *, handle_t *);
static void ext4_mb_free_committed_blocks(struct super_block *);
static void ext4_mb_return_to_preallocation(struct inode *inode,
struct ext4_buddy *e4b, sector_t block,
int count);
static void ext4_mb_put_pa(struct ext4_allocation_context *,
struct super_block *, struct ext4_prealloc_space *pa);
static int ext4_mb_init_per_dev_proc(struct super_block *sb);
static int ext4_mb_destroy_per_dev_proc(struct super_block *sb);
static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
{
struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
}
static inline void ext4_unlock_group(struct super_block *sb,
ext4_group_t group)
{
struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
}
static inline int ext4_is_group_locked(struct super_block *sb,
ext4_group_t group)
{
struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT,
&(grinfo->bb_state));
}
static ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
struct ext4_free_extent *fex)
{
ext4_fsblk_t block;
block = (ext4_fsblk_t) fex->fe_group * EXT4_BLOCKS_PER_GROUP(sb)
+ fex->fe_start
+ le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
return block;
}
#endif
......@@ -13,8 +13,8 @@
*/
#include <linux/module.h>
#include <linux/ext4_jbd2.h>
#include <linux/ext4_fs_extents.h>
#include "ext4_jbd2.h"
#include "ext4_extents.h"
/*
* The contiguous blocks details which can be
......@@ -339,7 +339,7 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
* i_data field of the original inode
*/
retval = ext4_journal_extend(handle, 1);
if (retval != 0) {
if (retval) {
retval = ext4_journal_restart(handle, 1);
if (retval)
goto err_out;
......@@ -350,6 +350,18 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
down_write(&EXT4_I(inode)->i_data_sem);
/*
* if EXT4_EXT_MIGRATE is cleared a block allocation
* happened after we started the migrate. We need to
* fail the migrate
*/
if (!(EXT4_I(inode)->i_flags & EXT4_EXT_MIGRATE)) {
retval = -EAGAIN;
up_write(&EXT4_I(inode)->i_data_sem);
goto err_out;
} else
EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags &
~EXT4_EXT_MIGRATE;
/*
* We have the extent map build with the tmp inode.
* Now copy the i_data across
......@@ -508,6 +520,17 @@ int ext4_ext_migrate(struct inode *inode, struct file *filp,
* switch the inode format to prevent read.
*/
mutex_lock(&(inode->i_mutex));
/*
* Even though we take i_mutex we can still cause block allocation
* via mmap write to holes. If we have allocated new blocks we fail
* migrate. New block allocation will clear EXT4_EXT_MIGRATE flag.
* The flag is updated with i_data_sem held to prevent racing with
* block allocation.
*/
down_read((&EXT4_I(inode)->i_data_sem));
EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags | EXT4_EXT_MIGRATE;
up_read((&EXT4_I(inode)->i_data_sem));
handle = ext4_journal_start(inode, 1);
ei = EXT4_I(inode);
......@@ -559,9 +582,15 @@ int ext4_ext_migrate(struct inode *inode, struct file *filp,
* tmp_inode
*/
free_ext_block(handle, tmp_inode);
else
retval = ext4_ext_swap_inode_data(handle, inode,
tmp_inode);
else {
retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
if (retval)
/*
* if we fail to swap inode data free the extent
* details of the tmp inode
*/
free_ext_block(handle, tmp_inode);
}
/* We mark the tmp_inode dirty via ext4_ext_tree_init. */
if (ext4_journal_extend(handle, 1) != 0)
......
......@@ -28,14 +28,14 @@
#include <linux/pagemap.h>
#include <linux/jbd2.h>
#include <linux/time.h>
#include <linux/ext4_fs.h>
#include <linux/ext4_jbd2.h>
#include <linux/fcntl.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include <linux/bio.h>
#include "ext4.h"
#include "ext4_jbd2.h"
#include "namei.h"
#include "xattr.h"
......@@ -57,10 +57,15 @@ static struct buffer_head *ext4_append(handle_t *handle,
*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
if ((bh = ext4_bread(handle, inode, *block, 1, err))) {
bh = ext4_bread(handle, inode, *block, 1, err);
if (bh) {
inode->i_size += inode->i_sb->s_blocksize;
EXT4_I(inode)->i_disksize = inode->i_size;
ext4_journal_get_write_access(handle,bh);
*err = ext4_journal_get_write_access(handle, bh);
if (*err) {
brelse(bh);
bh = NULL;
}
}
return bh;
}
......@@ -348,7 +353,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
if (root->info.hash_version != DX_HASH_TEA &&
root->info.hash_version != DX_HASH_HALF_MD4 &&
root->info.hash_version != DX_HASH_LEGACY) {
ext4_warning(dir->i_sb, __FUNCTION__,
ext4_warning(dir->i_sb, __func__,
"Unrecognised inode hash code %d",
root->info.hash_version);
brelse(bh);
......@@ -362,7 +367,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
hash = hinfo->hash;
if (root->info.unused_flags & 1) {
ext4_warning(dir->i_sb, __FUNCTION__,
ext4_warning(dir->i_sb, __func__,
"Unimplemented inode hash flags: %#06x",
root->info.unused_flags);
brelse(bh);
......@@ -371,7 +376,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
}
if ((indirect = root->info.indirect_levels) > 1) {
ext4_warning(dir->i_sb, __FUNCTION__,
ext4_warning(dir->i_sb, __func__,
"Unimplemented inode hash depth: %#06x",
root->info.indirect_levels);
brelse(bh);
......@@ -384,7 +389,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
if (dx_get_limit(entries) != dx_root_limit(dir,
root->info.info_length)) {
ext4_warning(dir->i_sb, __FUNCTION__,
ext4_warning(dir->i_sb, __func__,
"dx entry: limit != root limit");
brelse(bh);
*err = ERR_BAD_DX_DIR;
......@@ -396,7 +401,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
{
count = dx_get_count(entries);
if (!count || count > dx_get_limit(entries)) {
ext4_warning(dir->i_sb, __FUNCTION__,
ext4_warning(dir->i_sb, __func__,
"dx entry: no count or count > limit");
brelse(bh);
*err = ERR_BAD_DX_DIR;
......@@ -441,7 +446,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
goto fail2;
at = entries = ((struct dx_node *) bh->b_data)->entries;
if (dx_get_limit(entries) != dx_node_limit (dir)) {
ext4_warning(dir->i_sb, __FUNCTION__,
ext4_warning(dir->i_sb, __func__,
"dx entry: limit != node limit");
brelse(bh);
*err = ERR_BAD_DX_DIR;
......@@ -457,7 +462,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
}
fail:
if (*err == ERR_BAD_DX_DIR)
ext4_warning(dir->i_sb, __FUNCTION__,
ext4_warning(dir->i_sb, __func__,
"Corrupt dir inode %ld, running e2fsck is "
"recommended.", dir->i_ino);
return NULL;
......@@ -914,7 +919,7 @@ static struct buffer_head * ext4_find_entry (struct dentry *dentry,
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
/* read error, skip block & hope for the best */
ext4_error(sb, __FUNCTION__, "reading directory #%lu "
ext4_error(sb, __func__, "reading directory #%lu "
"offset %lu", dir->i_ino,
(unsigned long)block);
brelse(bh);
......@@ -1007,7 +1012,7 @@ static struct buffer_head * ext4_dx_find_entry(struct dentry *dentry,
retval = ext4_htree_next_block(dir, hash, frame,
frames, NULL);
if (retval < 0) {
ext4_warning(sb, __FUNCTION__,
ext4_warning(sb, __func__,
"error reading index page in directory #%lu",
dir->i_ino);
*err = retval;
......@@ -1532,7 +1537,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
if (levels && (dx_get_count(frames->entries) ==
dx_get_limit(frames->entries))) {
ext4_warning(sb, __FUNCTION__,
ext4_warning(sb, __func__,
"Directory index full!");
err = -ENOSPC;
goto cleanup;
......@@ -1860,11 +1865,11 @@ static int empty_dir (struct inode * inode)
if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) ||
!(bh = ext4_bread (NULL, inode, 0, 0, &err))) {
if (err)
ext4_error(inode->i_sb, __FUNCTION__,
ext4_error(inode->i_sb, __func__,
"error %d reading directory #%lu offset 0",
err, inode->i_ino);
else
ext4_warning(inode->i_sb, __FUNCTION__,
ext4_warning(inode->i_sb, __func__,
"bad directory (dir #%lu) - no data block",
inode->i_ino);
return 1;
......@@ -1893,7 +1898,7 @@ static int empty_dir (struct inode * inode)
offset >> EXT4_BLOCK_SIZE_BITS(sb), 0, &err);
if (!bh) {
if (err)
ext4_error(sb, __FUNCTION__,
ext4_error(sb, __func__,
"error %d reading directory"
" #%lu offset %lu",
err, inode->i_ino, offset);
......@@ -2217,6 +2222,8 @@ static int ext4_symlink (struct inode * dir,
goto out_stop;
}
} else {
/* clear the extent format for fast symlink */
EXT4_I(inode)->i_flags &= ~EXT4_EXTENTS_FL;
inode->i_op = &ext4_fast_symlink_inode_operations;
memcpy((char*)&EXT4_I(inode)->i_data,symname,l);
inode->i_size = l-1;
......@@ -2347,6 +2354,9 @@ static int ext4_rename (struct inode * old_dir, struct dentry *old_dentry,
EXT4_FEATURE_INCOMPAT_FILETYPE))
new_de->file_type = old_de->file_type;
new_dir->i_version++;
new_dir->i_ctime = new_dir->i_mtime =
ext4_current_time(new_dir);
ext4_mark_inode_dirty(handle, new_dir);
BUFFER_TRACE(new_bh, "call ext4_journal_dirty_metadata");
ext4_journal_dirty_metadata(handle, new_bh);
brelse(new_bh);
......
This diff is collapsed.
......@@ -21,8 +21,6 @@
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/jbd2.h>
#include <linux/ext4_fs.h>
#include <linux/ext4_jbd2.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/blkdev.h>
......@@ -38,9 +36,10 @@
#include <linux/seq_file.h>
#include <linux/log2.h>
#include <linux/crc16.h>
#include <asm/uaccess.h>
#include "ext4.h"
#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"
#include "namei.h"
......@@ -135,7 +134,7 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
* take the FS itself readonly cleanly. */
journal = EXT4_SB(sb)->s_journal;
if (is_journal_aborted(journal)) {
ext4_abort(sb, __FUNCTION__,
ext4_abort(sb, __func__,
"Detected aborted journal");
return ERR_PTR(-EROFS);
}
......@@ -355,7 +354,7 @@ void ext4_update_dynamic_rev(struct super_block *sb)
if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
return;
ext4_warning(sb, __FUNCTION__,
ext4_warning(sb, __func__,
"updating to rev %d because of new feature flag, "
"running e2fsck is recommended",
EXT4_DYNAMIC_REV);
......@@ -945,8 +944,8 @@ static match_table_t tokens = {
{Opt_mballoc, "mballoc"},
{Opt_nomballoc, "nomballoc"},
{Opt_stripe, "stripe=%u"},
{Opt_err, NULL},
{Opt_resize, "resize"},
{Opt_err, NULL},
};
static ext4_fsblk_t get_sb_block(void **data)
......@@ -1388,11 +1387,11 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
* a plain journaled filesystem we can keep it set as
* valid forever! :)
*/
es->s_state = cpu_to_le16(le16_to_cpu(es->s_state) & ~EXT4_VALID_FS);
es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
#endif
if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
es->s_mnt_count=cpu_to_le16(le16_to_cpu(es->s_mnt_count) + 1);
le16_add_cpu(&es->s_mnt_count, 1);
es->s_mtime = cpu_to_le32(get_seconds());
ext4_update_dynamic_rev(sb);
EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
......@@ -1485,33 +1484,30 @@ static int ext4_check_descriptors(struct super_block *sb)
block_bitmap = ext4_block_bitmap(sb, gdp);
if (block_bitmap < first_block || block_bitmap > last_block)
{
ext4_error (sb, "ext4_check_descriptors",
"Block bitmap for group %lu"
" not in group (block %llu)!",
i, block_bitmap);
printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
"Block bitmap for group %lu not in group "
"(block %llu)!", i, block_bitmap);
return 0;
}
inode_bitmap = ext4_inode_bitmap(sb, gdp);
if (inode_bitmap < first_block || inode_bitmap > last_block)
{
ext4_error (sb, "ext4_check_descriptors",
"Inode bitmap for group %lu"
" not in group (block %llu)!",
i, inode_bitmap);
printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
"Inode bitmap for group %lu not in group "
"(block %llu)!", i, inode_bitmap);
return 0;
}
inode_table = ext4_inode_table(sb, gdp);
if (inode_table < first_block ||
inode_table + sbi->s_itb_per_group - 1 > last_block)
{
ext4_error (sb, "ext4_check_descriptors",
"Inode table for group %lu"
" not in group (block %llu)!",
i, inode_table);
printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
"Inode table for group %lu not in group "
"(block %llu)!", i, inode_table);
return 0;
}
if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
ext4_error(sb, __FUNCTION__,
printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
"Checksum for group %lu failed (%u!=%u)\n",
i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
gdp)), le16_to_cpu(gdp->bg_checksum));
......@@ -1594,8 +1590,8 @@ static void ext4_orphan_cleanup (struct super_block * sb,
while (es->s_last_orphan) {
struct inode *inode;
if (!(inode =
ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan)))) {
inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
if (IS_ERR(inode)) {
es->s_last_orphan = 0;
break;
}
......@@ -1605,7 +1601,7 @@ static void ext4_orphan_cleanup (struct super_block * sb,
if (inode->i_nlink) {
printk(KERN_DEBUG
"%s: truncating inode %lu to %Ld bytes\n",
__FUNCTION__, inode->i_ino, inode->i_size);
__func__, inode->i_ino, inode->i_size);
jbd_debug(2, "truncating inode %lu to %Ld bytes\n",
inode->i_ino, inode->i_size);
ext4_truncate(inode);
......@@ -1613,7 +1609,7 @@ static void ext4_orphan_cleanup (struct super_block * sb,
} else {
printk(KERN_DEBUG
"%s: deleting unreferenced inode %lu\n",
__FUNCTION__, inode->i_ino);
__func__, inode->i_ino);
jbd_debug(2, "deleting unreferenced inode %lu\n",
inode->i_ino);
nr_orphans++;
......@@ -2699,9 +2695,9 @@ static void ext4_clear_journal_err(struct super_block * sb,
char nbuf[16];
errstr = ext4_decode_error(sb, j_errno, nbuf);
ext4_warning(sb, __FUNCTION__, "Filesystem error recorded "
ext4_warning(sb, __func__, "Filesystem error recorded "
"from previous mount: %s", errstr);
ext4_warning(sb, __FUNCTION__, "Marking fs in need of "
ext4_warning(sb, __func__, "Marking fs in need of "
"filesystem check.");
EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
......@@ -2828,7 +2824,7 @@ static int ext4_remount (struct super_block * sb, int * flags, char * data)
}
if (sbi->s_mount_opt & EXT4_MOUNT_ABORT)
ext4_abort(sb, __FUNCTION__, "Abort forced by user");
ext4_abort(sb, __func__, "Abort forced by user");
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
......@@ -3040,8 +3036,14 @@ static int ext4_dquot_drop(struct inode *inode)
/* We may delete quota structure so we need to reserve enough blocks */
handle = ext4_journal_start(inode, 2*EXT4_QUOTA_DEL_BLOCKS(inode->i_sb));
if (IS_ERR(handle))
if (IS_ERR(handle)) {
/*
* We call dquot_drop() anyway to at least release references
* to quota structures so that umount does not hang.
*/
dquot_drop(inode);
return PTR_ERR(handle);
}
ret = dquot_drop(inode);
err = ext4_journal_stop(handle);
if (!ret)
......
......@@ -19,8 +19,8 @@
#include <linux/fs.h>
#include <linux/jbd2.h>
#include <linux/ext4_fs.h>
#include <linux/namei.h>
#include "ext4.h"
#include "xattr.h"
static void * ext4_follow_link(struct dentry *dentry, struct nameidata *nd)
......
......@@ -53,11 +53,11 @@
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/ext4_jbd2.h>
#include <linux/ext4_fs.h>
#include <linux/mbcache.h>
#include <linux/quotaops.h>
#include <linux/rwsem.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "xattr.h"
#include "acl.h"
......@@ -92,6 +92,8 @@ static struct buffer_head *ext4_xattr_cache_find(struct inode *,
struct mb_cache_entry **);
static void ext4_xattr_rehash(struct ext4_xattr_header *,
struct ext4_xattr_entry *);
static int ext4_xattr_list(struct inode *inode, char *buffer,
size_t buffer_size);
static struct mb_cache *ext4_xattr_cache;
......@@ -225,7 +227,7 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
ea_bdebug(bh, "b_count=%d, refcount=%d",
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
if (ext4_xattr_check_block(bh)) {
bad_block: ext4_error(inode->i_sb, __FUNCTION__,
bad_block: ext4_error(inode->i_sb, __func__,
"inode %lu: bad block %llu", inode->i_ino,
EXT4_I(inode)->i_file_acl);
error = -EIO;
......@@ -367,7 +369,7 @@ ext4_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size)
ea_bdebug(bh, "b_count=%d, refcount=%d",
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
if (ext4_xattr_check_block(bh)) {
ext4_error(inode->i_sb, __FUNCTION__,
ext4_error(inode->i_sb, __func__,
"inode %lu: bad block %llu", inode->i_ino,
EXT4_I(inode)->i_file_acl);
error = -EIO;
......@@ -420,7 +422,7 @@ ext4_xattr_ibody_list(struct inode *inode, char *buffer, size_t buffer_size)
* Returns a negative error number on failure, or the number of bytes
* used / required on success.
*/
int
static int
ext4_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
{
int i_error, b_error;
......@@ -484,8 +486,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
get_bh(bh);
ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
} else {
BHDR(bh)->h_refcount = cpu_to_le32(
le32_to_cpu(BHDR(bh)->h_refcount) - 1);
le32_add_cpu(&BHDR(bh)->h_refcount, -1);
error = ext4_journal_dirty_metadata(handle, bh);
if (IS_SYNC(inode))
handle->h_sync = 1;
......@@ -660,7 +661,7 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
atomic_read(&(bs->bh->b_count)),
le32_to_cpu(BHDR(bs->bh)->h_refcount));
if (ext4_xattr_check_block(bs->bh)) {
ext4_error(sb, __FUNCTION__,
ext4_error(sb, __func__,
"inode %lu: bad block %llu", inode->i_ino,
EXT4_I(inode)->i_file_acl);
error = -EIO;
......@@ -738,7 +739,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
ce = NULL;
}
ea_bdebug(bs->bh, "cloning");
s->base = kmalloc(bs->bh->b_size, GFP_KERNEL);
s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
error = -ENOMEM;
if (s->base == NULL)
goto cleanup;
......@@ -750,7 +751,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
}
} else {
/* Allocate a buffer where we construct the new block. */
s->base = kzalloc(sb->s_blocksize, GFP_KERNEL);
s->base = kzalloc(sb->s_blocksize, GFP_NOFS);
/* assert(header == s->base) */
error = -ENOMEM;
if (s->base == NULL)
......@@ -789,8 +790,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
if (error)
goto cleanup_dquot;
lock_buffer(new_bh);
BHDR(new_bh)->h_refcount = cpu_to_le32(1 +
le32_to_cpu(BHDR(new_bh)->h_refcount));
le32_add_cpu(&BHDR(new_bh)->h_refcount, 1);
ea_bdebug(new_bh, "reusing; refcount now=%d",
le32_to_cpu(BHDR(new_bh)->h_refcount));
unlock_buffer(new_bh);
......@@ -808,10 +808,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
get_bh(new_bh);
} else {
/* We need to allocate a new block */
ext4_fsblk_t goal = le32_to_cpu(
EXT4_SB(sb)->s_es->s_first_data_block) +
(ext4_fsblk_t)EXT4_I(inode)->i_block_group *
EXT4_BLOCKS_PER_GROUP(sb);
ext4_fsblk_t goal = ext4_group_first_block_no(sb,
EXT4_I(inode)->i_block_group);
ext4_fsblk_t block = ext4_new_block(handle, inode,
goal, &error);
if (error)
......@@ -863,7 +861,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
goto cleanup;
bad_block:
ext4_error(inode->i_sb, __FUNCTION__,
ext4_error(inode->i_sb, __func__,
"inode %lu: bad block %llu", inode->i_ino,
EXT4_I(inode)->i_file_acl);
goto cleanup;
......@@ -1166,7 +1164,7 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
if (!bh)
goto cleanup;
if (ext4_xattr_check_block(bh)) {
ext4_error(inode->i_sb, __FUNCTION__,
ext4_error(inode->i_sb, __func__,
"inode %lu: bad block %llu", inode->i_ino,
EXT4_I(inode)->i_file_acl);
error = -EIO;
......@@ -1341,14 +1339,14 @@ ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
goto cleanup;
bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
if (!bh) {
ext4_error(inode->i_sb, __FUNCTION__,
ext4_error(inode->i_sb, __func__,
"inode %lu: block %llu read error", inode->i_ino,
EXT4_I(inode)->i_file_acl);
goto cleanup;
}
if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
BHDR(bh)->h_blocks != cpu_to_le32(1)) {
ext4_error(inode->i_sb, __FUNCTION__,
ext4_error(inode->i_sb, __func__,
"inode %lu: bad block %llu", inode->i_ino,
EXT4_I(inode)->i_file_acl);
goto cleanup;
......@@ -1475,7 +1473,7 @@ ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
}
bh = sb_bread(inode->i_sb, ce->e_block);
if (!bh) {
ext4_error(inode->i_sb, __FUNCTION__,
ext4_error(inode->i_sb, __func__,
"inode %lu: block %lu read error",
inode->i_ino, (unsigned long) ce->e_block);
} else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
......
......@@ -74,7 +74,6 @@ extern struct xattr_handler ext4_xattr_security_handler;
extern ssize_t ext4_listxattr(struct dentry *, char *, size_t);
extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t);
extern int ext4_xattr_list(struct inode *, char *, size_t);
extern int ext4_xattr_set(struct inode *, int, const char *, const void *, size_t, int);
extern int ext4_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int);
......@@ -98,12 +97,6 @@ ext4_xattr_get(struct inode *inode, int name_index, const char *name,
return -EOPNOTSUPP;
}
static inline int
ext4_xattr_list(struct inode *inode, void *buffer, size_t size)
{
return -EOPNOTSUPP;
}
static inline int
ext4_xattr_set(struct inode *inode, int name_index, const char *name,
const void *value, size_t size, int flags)
......
......@@ -6,9 +6,9 @@
#include <linux/module.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/ext4_jbd2.h>
#include <linux/ext4_fs.h>
#include <linux/security.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "xattr.h"
static size_t
......
......@@ -9,8 +9,8 @@
#include <linux/string.h>
#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/ext4_jbd2.h>
#include <linux/ext4_fs.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "xattr.h"
#define XATTR_TRUSTED_PREFIX "trusted."
......
......@@ -8,8 +8,8 @@
#include <linux/module.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/ext4_jbd2.h>
#include <linux/ext4_fs.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "xattr.h"
#define XATTR_USER_PREFIX "user."
......
......@@ -519,22 +519,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
jbd_debug (3, "JBD: commit phase 2\n");
/*
* First, drop modified flag: all accesses to the buffers
* will be tracked for a new trasaction only -bzzz
*/
spin_lock(&journal->j_list_lock);
if (commit_transaction->t_buffers) {
new_jh = jh = commit_transaction->t_buffers->b_tnext;
do {
J_ASSERT_JH(new_jh, new_jh->b_modified == 1 ||
new_jh->b_modified == 0);
new_jh->b_modified = 0;
new_jh = new_jh->b_tnext;
} while (new_jh != jh);
}
spin_unlock(&journal->j_list_lock);
/*
* Now start flushing things to disk, in the order they appear
* on the transaction lists. Data blocks go first.
......@@ -584,6 +568,9 @@ void jbd2_journal_commit_transaction(journal_t *journal)
stats.u.run.rs_blocks = commit_transaction->t_outstanding_credits;
stats.u.run.rs_blocks_logged = 0;
J_ASSERT(commit_transaction->t_nr_buffers <=
commit_transaction->t_outstanding_credits);
descriptor = NULL;
bufs = 0;
while (commit_transaction->t_buffers) {
......
......@@ -534,7 +534,7 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
if (!tid_geq(journal->j_commit_request, tid)) {
printk(KERN_EMERG
"%s: error: j_commit_request=%d, tid=%d\n",
__FUNCTION__, journal->j_commit_request, tid);
__func__, journal->j_commit_request, tid);
}
spin_unlock(&journal->j_state_lock);
#endif
......@@ -599,7 +599,7 @@ int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr,
printk(KERN_ALERT "%s: journal block not found "
"at offset %lu on %s\n",
__FUNCTION__,
__func__,
blocknr,
bdevname(journal->j_dev, b));
err = -EIO;
......@@ -997,13 +997,14 @@ static journal_t * journal_init_common (void)
*/
/**
* journal_t * jbd2_journal_init_dev() - creates an initialises a journal structure
* journal_t * jbd2_journal_init_dev() - creates and initialises a journal structure
* @bdev: Block device on which to create the journal
* @fs_dev: Device which hold journalled filesystem for this journal.
* @start: Block nr Start of journal.
* @len: Length of the journal in blocks.
* @blocksize: blocksize of journalling device
* @returns: a newly created journal_t *
*
* Returns: a newly created journal_t *
*
* jbd2_journal_init_dev creates a journal which maps a fixed contiguous
* range of blocks on an arbitrary block device.
......@@ -1027,7 +1028,7 @@ journal_t * jbd2_journal_init_dev(struct block_device *bdev,
journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
if (!journal->j_wbuf) {
printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
__FUNCTION__);
__func__);
kfree(journal);
journal = NULL;
goto out;
......@@ -1083,7 +1084,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
if (!journal->j_wbuf) {
printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
__FUNCTION__);
__func__);
kfree(journal);
return NULL;
}
......@@ -1092,7 +1093,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
/* If that failed, give up */
if (err) {
printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
__FUNCTION__);
__func__);
kfree(journal);
return NULL;
}
......@@ -1178,7 +1179,7 @@ int jbd2_journal_create(journal_t *journal)
*/
printk(KERN_EMERG
"%s: creation of journal on external device!\n",
__FUNCTION__);
__func__);
BUG();
}
......@@ -1976,9 +1977,10 @@ static int journal_init_jbd2_journal_head_cache(void)
static void jbd2_journal_destroy_jbd2_journal_head_cache(void)
{
J_ASSERT(jbd2_journal_head_cache != NULL);
if (jbd2_journal_head_cache) {
kmem_cache_destroy(jbd2_journal_head_cache);
jbd2_journal_head_cache = NULL;
}
}
/*
......@@ -1997,7 +1999,7 @@ static struct journal_head *journal_alloc_journal_head(void)
jbd_debug(1, "out of memory for journal_head\n");
if (time_after(jiffies, last_warning + 5*HZ)) {
printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
__FUNCTION__);
__func__);
last_warning = jiffies;
}
while (!ret) {
......@@ -2134,13 +2136,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
if (jh->b_frozen_data) {
printk(KERN_WARNING "%s: freeing "
"b_frozen_data\n",
__FUNCTION__);
__func__);
jbd2_free(jh->b_frozen_data, bh->b_size);
}
if (jh->b_committed_data) {
printk(KERN_WARNING "%s: freeing "
"b_committed_data\n",
__FUNCTION__);
__func__);
jbd2_free(jh->b_committed_data, bh->b_size);
}
bh->b_private = NULL;
......@@ -2305,10 +2307,12 @@ static int __init journal_init(void)
BUILD_BUG_ON(sizeof(struct journal_superblock_s) != 1024);
ret = journal_init_caches();
if (ret != 0)
jbd2_journal_destroy_caches();
if (ret == 0) {
jbd2_create_debugfs_entry();
jbd2_create_jbd_stats_proc_entry();
} else {
jbd2_journal_destroy_caches();
}
return ret;
}
......
......@@ -139,7 +139,7 @@ static int insert_revoke_hash(journal_t *journal, unsigned long long blocknr,
oom:
if (!journal_oom_retry)
return -ENOMEM;
jbd_debug(1, "ENOMEM in %s, retrying\n", __FUNCTION__);
jbd_debug(1, "ENOMEM in %s, retrying\n", __func__);
yield();
goto repeat;
}
......@@ -167,138 +167,121 @@ static struct jbd2_revoke_record_s *find_revoke_record(journal_t *journal,
return NULL;
}
void jbd2_journal_destroy_revoke_caches(void)
{
if (jbd2_revoke_record_cache) {
kmem_cache_destroy(jbd2_revoke_record_cache);
jbd2_revoke_record_cache = NULL;
}
if (jbd2_revoke_table_cache) {
kmem_cache_destroy(jbd2_revoke_table_cache);
jbd2_revoke_table_cache = NULL;
}
}
int __init jbd2_journal_init_revoke_caches(void)
{
J_ASSERT(!jbd2_revoke_record_cache);
J_ASSERT(!jbd2_revoke_table_cache);
jbd2_revoke_record_cache = kmem_cache_create("jbd2_revoke_record",
sizeof(struct jbd2_revoke_record_s),
0,
SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
NULL);
if (!jbd2_revoke_record_cache)
return -ENOMEM;
goto record_cache_failure;
jbd2_revoke_table_cache = kmem_cache_create("jbd2_revoke_table",
sizeof(struct jbd2_revoke_table_s),
0, SLAB_TEMPORARY, NULL);
if (!jbd2_revoke_table_cache) {
kmem_cache_destroy(jbd2_revoke_record_cache);
jbd2_revoke_record_cache = NULL;
return -ENOMEM;
}
if (!jbd2_revoke_table_cache)
goto table_cache_failure;
return 0;
table_cache_failure:
jbd2_journal_destroy_revoke_caches();
record_cache_failure:
return -ENOMEM;
}
void jbd2_journal_destroy_revoke_caches(void)
{
kmem_cache_destroy(jbd2_revoke_record_cache);
jbd2_revoke_record_cache = NULL;
kmem_cache_destroy(jbd2_revoke_table_cache);
jbd2_revoke_table_cache = NULL;
}
/* Initialise the revoke table for a given journal to a given size. */
int jbd2_journal_init_revoke(journal_t *journal, int hash_size)
static struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size)
{
int shift, tmp;
int shift = 0;
int tmp = hash_size;
struct jbd2_revoke_table_s *table;
J_ASSERT (journal->j_revoke_table[0] == NULL);
table = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
if (!table)
goto out;
shift = 0;
tmp = hash_size;
while((tmp >>= 1UL) != 0UL)
shift++;
journal->j_revoke_table[0] = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
if (!journal->j_revoke_table[0])
return -ENOMEM;
journal->j_revoke = journal->j_revoke_table[0];
/* Check that the hash_size is a power of two */
J_ASSERT(is_power_of_2(hash_size));
journal->j_revoke->hash_size = hash_size;
journal->j_revoke->hash_shift = shift;
journal->j_revoke->hash_table =
table->hash_size = hash_size;
table->hash_shift = shift;
table->hash_table =
kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
if (!journal->j_revoke->hash_table) {
kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
journal->j_revoke = NULL;
return -ENOMEM;
if (!table->hash_table) {
kmem_cache_free(jbd2_revoke_table_cache, table);
table = NULL;
goto out;
}
for (tmp = 0; tmp < hash_size; tmp++)
INIT_LIST_HEAD(&journal->j_revoke->hash_table[tmp]);
INIT_LIST_HEAD(&table->hash_table[tmp]);
journal->j_revoke_table[1] = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
if (!journal->j_revoke_table[1]) {
kfree(journal->j_revoke_table[0]->hash_table);
kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
return -ENOMEM;
out:
return table;
}
static void jbd2_journal_destroy_revoke_table(struct jbd2_revoke_table_s *table)
{
int i;
struct list_head *hash_list;
for (i = 0; i < table->hash_size; i++) {
hash_list = &table->hash_table[i];
J_ASSERT(list_empty(hash_list));
}
journal->j_revoke = journal->j_revoke_table[1];
kfree(table->hash_table);
kmem_cache_free(jbd2_revoke_table_cache, table);
}
/* Check that the hash_size is a power of two */
/* Initialise the revoke table for a given journal to a given size. */
int jbd2_journal_init_revoke(journal_t *journal, int hash_size)
{
J_ASSERT(journal->j_revoke_table[0] == NULL);
J_ASSERT(is_power_of_2(hash_size));
journal->j_revoke->hash_size = hash_size;
journal->j_revoke->hash_shift = shift;
journal->j_revoke_table[0] = jbd2_journal_init_revoke_table(hash_size);
if (!journal->j_revoke_table[0])
goto fail0;
journal->j_revoke->hash_table =
kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
if (!journal->j_revoke->hash_table) {
kfree(journal->j_revoke_table[0]->hash_table);
kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[1]);
journal->j_revoke = NULL;
return -ENOMEM;
}
journal->j_revoke_table[1] = jbd2_journal_init_revoke_table(hash_size);
if (!journal->j_revoke_table[1])
goto fail1;
for (tmp = 0; tmp < hash_size; tmp++)
INIT_LIST_HEAD(&journal->j_revoke->hash_table[tmp]);
journal->j_revoke = journal->j_revoke_table[1];
spin_lock_init(&journal->j_revoke_lock);
return 0;
}
/* Destoy a journal's revoke table. The table must already be empty! */
fail1:
jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]);
fail0:
return -ENOMEM;
}
/* Destroy a journal's revoke table. The table must already be empty! */
void jbd2_journal_destroy_revoke(journal_t *journal)
{
struct jbd2_revoke_table_s *table;
struct list_head *hash_list;
int i;
table = journal->j_revoke_table[0];
if (!table)
return;
for (i=0; i<table->hash_size; i++) {
hash_list = &table->hash_table[i];
J_ASSERT (list_empty(hash_list));
}
kfree(table->hash_table);
kmem_cache_free(jbd2_revoke_table_cache, table);
journal->j_revoke = NULL;
table = journal->j_revoke_table[1];
if (!table)
return;
for (i=0; i<table->hash_size; i++) {
hash_list = &table->hash_table[i];
J_ASSERT (list_empty(hash_list));
}
kfree(table->hash_table);
kmem_cache_free(jbd2_revoke_table_cache, table);
journal->j_revoke = NULL;
if (journal->j_revoke_table[0])
jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]);
if (journal->j_revoke_table[1])
jbd2_journal_destroy_revoke_table(journal->j_revoke_table[1]);
}
......
......@@ -617,6 +617,12 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
jh->b_next_transaction == transaction)
goto done;
/*
* this is the first time this transaction is touching this buffer,
* reset the modified flag
*/
jh->b_modified = 0;
/*
* If there is already a copy-out version of this buffer, then we don't
* need to make another one
......@@ -690,7 +696,7 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
if (!frozen_buffer) {
printk(KERN_EMERG
"%s: OOM for frozen_buffer\n",
__FUNCTION__);
__func__);
JBUFFER_TRACE(jh, "oom!");
error = -ENOMEM;
jbd_lock_bh_state(bh);
......@@ -829,9 +835,16 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
if (jh->b_transaction == NULL) {
jh->b_transaction = transaction;
/* first access by this transaction */
jh->b_modified = 0;
JBUFFER_TRACE(jh, "file as BJ_Reserved");
__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
} else if (jh->b_transaction == journal->j_committing_transaction) {
/* first access by this transaction */
jh->b_modified = 0;
JBUFFER_TRACE(jh, "set next transaction");
jh->b_next_transaction = transaction;
}
......@@ -901,7 +914,7 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
if (!committed_data) {
printk(KERN_EMERG "%s: No memory for committed data\n",
__FUNCTION__);
__func__);
err = -ENOMEM;
goto out;
}
......@@ -1230,6 +1243,7 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
struct journal_head *jh;
int drop_reserve = 0;
int err = 0;
int was_modified = 0;
BUFFER_TRACE(bh, "entry");
......@@ -1248,6 +1262,9 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
goto not_jbd;
}
/* keep track of wether or not this transaction modified us */
was_modified = jh->b_modified;
/*
* The buffer's going from the transaction, we must drop
* all references -bzzz
......@@ -1265,6 +1282,11 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
/*
* we only want to drop a reference if this transaction
* modified the buffer
*/
if (was_modified)
drop_reserve = 1;
/*
......@@ -1305,6 +1327,12 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
if (jh->b_next_transaction) {
J_ASSERT(jh->b_next_transaction == transaction);
jh->b_next_transaction = NULL;
/*
* only drop a reference if this transaction modified
* the buffer
*/
if (was_modified)
drop_reserve = 1;
}
}
......@@ -1434,7 +1462,8 @@ int jbd2_journal_stop(handle_t *handle)
return err;
}
/**int jbd2_journal_force_commit() - force any uncommitted transactions
/**
* int jbd2_journal_force_commit() - force any uncommitted transactions
* @journal: journal to force
*
* For synchronous operations: force any uncommitted transactions
......@@ -2077,7 +2106,7 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
jh->b_transaction = jh->b_next_transaction;
jh->b_next_transaction = NULL;
__jbd2_journal_file_buffer(jh, jh->b_transaction,
was_dirty ? BJ_Metadata : BJ_Reserved);
jh->b_modified ? BJ_Metadata : BJ_Reserved);
J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
if (was_dirty)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment