Commit cec99709 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

Pull quota, reiserfs, UDF updates from Jan Kara:
 "Scalability improvements for quota, a few reiserfs fixes, and couple
  of misc cleanups (udf, ext2)"

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
  reiserfs: Fix use after free in journal teardown
  reiserfs: fix corruption introduced by balance_leaf refactor
  udf: avoid redundant memcpy when writing data in ICB
  fs/udf: re-use hex_asc_upper_{hi,lo} macros
  fs/quota: kernel-doc warning fixes
  udf: use linux/uaccess.h
  fs/ext2/super.c: Drop memory allocation cast
  quota: remove dqptr_sem
  quota: simplify remove_inode_dquot_ref()
  quota: avoid unnecessary dqget()/dqput() calls
  quota: protect Q_GETFMT by dqonoff_mutex
parents 8d2d441a 01777836
...@@ -161,7 +161,7 @@ static struct kmem_cache * ext2_inode_cachep; ...@@ -161,7 +161,7 @@ static struct kmem_cache * ext2_inode_cachep;
static struct inode *ext2_alloc_inode(struct super_block *sb) static struct inode *ext2_alloc_inode(struct super_block *sb)
{ {
struct ext2_inode_info *ei; struct ext2_inode_info *ei;
ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL); ei = kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL);
if (!ei) if (!ei)
return NULL; return NULL;
ei->i_block_alloc_info = NULL; ei->i_block_alloc_info = NULL;
......
...@@ -96,13 +96,16 @@ ...@@ -96,13 +96,16 @@
* Note that some things (eg. sb pointer, type, id) doesn't change during * Note that some things (eg. sb pointer, type, id) doesn't change during
* the life of the dquot structure and so needn't to be protected by a lock * the life of the dquot structure and so needn't to be protected by a lock
* *
* Any operation working on dquots via inode pointers must hold dqptr_sem. If * Operation accessing dquots via inode pointers are protected by dquot_srcu.
* operation is just reading pointers from inode (or not using them at all) the * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and
* read lock is enough. If pointers are altered function must hold write lock. * synchronize_srcu(&dquot_srcu) is called after clearing pointers from
* inode and before dropping dquot references to avoid use of dquots after
* they are freed. dq_data_lock is used to serialize the pointer setting and
* clearing operations.
* Special care needs to be taken about S_NOQUOTA inode flag (marking that * Special care needs to be taken about S_NOQUOTA inode flag (marking that
* inode is a quota file). Functions adding pointers from inode to dquots have * inode is a quota file). Functions adding pointers from inode to dquots have
* to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they
* have to do all pointer modifications before dropping dqptr_sem. This makes * have to do all pointer modifications before dropping dq_data_lock. This makes
* sure they cannot race with quotaon which first sets S_NOQUOTA flag and * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
* then drops all pointers to dquots from an inode. * then drops all pointers to dquots from an inode.
* *
...@@ -116,21 +119,15 @@ ...@@ -116,21 +119,15 @@
* spinlock to internal buffers before writing. * spinlock to internal buffers before writing.
* *
* Lock ordering (including related VFS locks) is the following: * Lock ordering (including related VFS locks) is the following:
* dqonoff_mutex > i_mutex > journal_lock > dqptr_sem > dquot->dq_lock > * dqonoff_mutex > i_mutex > journal_lock > dquot->dq_lock > dqio_mutex
* dqio_mutex
* dqonoff_mutex > i_mutex comes from dquot_quota_sync, dquot_enable, etc. * dqonoff_mutex > i_mutex comes from dquot_quota_sync, dquot_enable, etc.
* The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
* dqptr_sem. But filesystem has to count with the fact that functions such as
* dquot_alloc_space() acquire dqptr_sem and they usually have to be called
* from inside a transaction to keep filesystem consistency after a crash. Also
* filesystems usually want to do some IO on dquot from ->mark_dirty which is
* called with dqptr_sem held.
*/ */
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock); static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock); static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
EXPORT_SYMBOL(dq_data_lock); EXPORT_SYMBOL(dq_data_lock);
DEFINE_STATIC_SRCU(dquot_srcu);
void __quota_error(struct super_block *sb, const char *func, void __quota_error(struct super_block *sb, const char *func,
const char *fmt, ...) const char *fmt, ...)
...@@ -733,7 +730,6 @@ static struct shrinker dqcache_shrinker = { ...@@ -733,7 +730,6 @@ static struct shrinker dqcache_shrinker = {
/* /*
* Put reference to dquot * Put reference to dquot
* NOTE: If you change this function please check whether dqput_blocks() works right...
*/ */
void dqput(struct dquot *dquot) void dqput(struct dquot *dquot)
{ {
...@@ -962,47 +958,34 @@ static void add_dquot_ref(struct super_block *sb, int type) ...@@ -962,47 +958,34 @@ static void add_dquot_ref(struct super_block *sb, int type)
#endif #endif
} }
/*
* Return 0 if dqput() won't block.
* (note that 1 doesn't necessarily mean blocking)
*/
static inline int dqput_blocks(struct dquot *dquot)
{
if (atomic_read(&dquot->dq_count) <= 1)
return 1;
return 0;
}
/* /*
* Remove references to dquots from inode and add dquot to list for freeing * Remove references to dquots from inode and add dquot to list for freeing
* if we have the last reference to dquot * if we have the last reference to dquot
* We can't race with anybody because we hold dqptr_sem for writing...
*/ */
static int remove_inode_dquot_ref(struct inode *inode, int type, static void remove_inode_dquot_ref(struct inode *inode, int type,
struct list_head *tofree_head) struct list_head *tofree_head)
{ {
struct dquot *dquot = inode->i_dquot[type]; struct dquot *dquot = inode->i_dquot[type];
inode->i_dquot[type] = NULL; inode->i_dquot[type] = NULL;
if (dquot) { if (!dquot)
if (dqput_blocks(dquot)) { return;
#ifdef CONFIG_QUOTA_DEBUG
if (atomic_read(&dquot->dq_count) != 1) if (list_empty(&dquot->dq_free)) {
quota_error(inode->i_sb, "Adding dquot with " /*
"dq_count %d to dispose list", * The inode still has reference to dquot so it can't be in the
atomic_read(&dquot->dq_count)); * free list
#endif */
spin_lock(&dq_list_lock); spin_lock(&dq_list_lock);
/* As dquot must have currently users it can't be on
* the free list... */
list_add(&dquot->dq_free, tofree_head); list_add(&dquot->dq_free, tofree_head);
spin_unlock(&dq_list_lock); spin_unlock(&dq_list_lock);
return 1; } else {
} /*
else * Dquot is already in a list to put so we won't drop the last
dqput(dquot); /* We have guaranteed we won't block */ * reference here.
*/
dqput(dquot);
} }
return 0;
} }
/* /*
...@@ -1037,13 +1020,15 @@ static void remove_dquot_ref(struct super_block *sb, int type, ...@@ -1037,13 +1020,15 @@ static void remove_dquot_ref(struct super_block *sb, int type,
* We have to scan also I_NEW inodes because they can already * We have to scan also I_NEW inodes because they can already
* have quota pointer initialized. Luckily, we need to touch * have quota pointer initialized. Luckily, we need to touch
* only quota pointers and these have separate locking * only quota pointers and these have separate locking
* (dqptr_sem). * (dq_data_lock).
*/ */
spin_lock(&dq_data_lock);
if (!IS_NOQUOTA(inode)) { if (!IS_NOQUOTA(inode)) {
if (unlikely(inode_get_rsv_space(inode) > 0)) if (unlikely(inode_get_rsv_space(inode) > 0))
reserved = 1; reserved = 1;
remove_inode_dquot_ref(inode, type, tofree_head); remove_inode_dquot_ref(inode, type, tofree_head);
} }
spin_unlock(&dq_data_lock);
} }
spin_unlock(&inode_sb_list_lock); spin_unlock(&inode_sb_list_lock);
#ifdef CONFIG_QUOTA_DEBUG #ifdef CONFIG_QUOTA_DEBUG
...@@ -1061,9 +1046,8 @@ static void drop_dquot_ref(struct super_block *sb, int type) ...@@ -1061,9 +1046,8 @@ static void drop_dquot_ref(struct super_block *sb, int type)
LIST_HEAD(tofree_head); LIST_HEAD(tofree_head);
if (sb->dq_op) { if (sb->dq_op) {
down_write(&sb_dqopt(sb)->dqptr_sem);
remove_dquot_ref(sb, type, &tofree_head); remove_dquot_ref(sb, type, &tofree_head);
up_write(&sb_dqopt(sb)->dqptr_sem); synchronize_srcu(&dquot_srcu);
put_dquot_list(&tofree_head); put_dquot_list(&tofree_head);
} }
} }
...@@ -1394,21 +1378,16 @@ static int dquot_active(const struct inode *inode) ...@@ -1394,21 +1378,16 @@ static int dquot_active(const struct inode *inode)
/* /*
* Initialize quota pointers in inode * Initialize quota pointers in inode
* *
* We do things in a bit complicated way but by that we avoid calling
* dqget() and thus filesystem callbacks under dqptr_sem.
*
* It is better to call this function outside of any transaction as it * It is better to call this function outside of any transaction as it
* might need a lot of space in journal for dquot structure allocation. * might need a lot of space in journal for dquot structure allocation.
*/ */
static void __dquot_initialize(struct inode *inode, int type) static void __dquot_initialize(struct inode *inode, int type)
{ {
int cnt; int cnt, init_needed = 0;
struct dquot *got[MAXQUOTAS]; struct dquot *got[MAXQUOTAS];
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
qsize_t rsv; qsize_t rsv;
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
if (!dquot_active(inode)) if (!dquot_active(inode))
return; return;
...@@ -1418,6 +1397,15 @@ static void __dquot_initialize(struct inode *inode, int type) ...@@ -1418,6 +1397,15 @@ static void __dquot_initialize(struct inode *inode, int type)
got[cnt] = NULL; got[cnt] = NULL;
if (type != -1 && cnt != type) if (type != -1 && cnt != type)
continue; continue;
/*
* The i_dquot should have been initialized in most cases,
* we check it without locking here to avoid unnecessary
* dqget()/dqput() calls.
*/
if (inode->i_dquot[cnt])
continue;
init_needed = 1;
switch (cnt) { switch (cnt) {
case USRQUOTA: case USRQUOTA:
qid = make_kqid_uid(inode->i_uid); qid = make_kqid_uid(inode->i_uid);
...@@ -1429,7 +1417,11 @@ static void __dquot_initialize(struct inode *inode, int type) ...@@ -1429,7 +1417,11 @@ static void __dquot_initialize(struct inode *inode, int type)
got[cnt] = dqget(sb, qid); got[cnt] = dqget(sb, qid);
} }
down_write(&sb_dqopt(sb)->dqptr_sem); /* All required i_dquot has been initialized */
if (!init_needed)
return;
spin_lock(&dq_data_lock);
if (IS_NOQUOTA(inode)) if (IS_NOQUOTA(inode))
goto out_err; goto out_err;
for (cnt = 0; cnt < MAXQUOTAS; cnt++) { for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
...@@ -1449,15 +1441,12 @@ static void __dquot_initialize(struct inode *inode, int type) ...@@ -1449,15 +1441,12 @@ static void __dquot_initialize(struct inode *inode, int type)
* did a write before quota was turned on * did a write before quota was turned on
*/ */
rsv = inode_get_rsv_space(inode); rsv = inode_get_rsv_space(inode);
if (unlikely(rsv)) { if (unlikely(rsv))
spin_lock(&dq_data_lock);
dquot_resv_space(inode->i_dquot[cnt], rsv); dquot_resv_space(inode->i_dquot[cnt], rsv);
spin_unlock(&dq_data_lock);
}
} }
} }
out_err: out_err:
up_write(&sb_dqopt(sb)->dqptr_sem); spin_unlock(&dq_data_lock);
/* Drop unused references */ /* Drop unused references */
dqput_all(got); dqput_all(got);
} }
...@@ -1469,19 +1458,24 @@ void dquot_initialize(struct inode *inode) ...@@ -1469,19 +1458,24 @@ void dquot_initialize(struct inode *inode)
EXPORT_SYMBOL(dquot_initialize); EXPORT_SYMBOL(dquot_initialize);
/* /*
* Release all quotas referenced by inode * Release all quotas referenced by inode.
*
* This function only be called on inode free or converting
* a file to quota file, no other users for the i_dquot in
* both cases, so we needn't call synchronize_srcu() after
* clearing i_dquot.
*/ */
static void __dquot_drop(struct inode *inode) static void __dquot_drop(struct inode *inode)
{ {
int cnt; int cnt;
struct dquot *put[MAXQUOTAS]; struct dquot *put[MAXQUOTAS];
down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) { for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
put[cnt] = inode->i_dquot[cnt]; put[cnt] = inode->i_dquot[cnt];
inode->i_dquot[cnt] = NULL; inode->i_dquot[cnt] = NULL;
} }
up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); spin_unlock(&dq_data_lock);
dqput_all(put); dqput_all(put);
} }
...@@ -1599,15 +1593,11 @@ static void inode_decr_space(struct inode *inode, qsize_t number, int reserve) ...@@ -1599,15 +1593,11 @@ static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
*/ */
int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
{ {
int cnt, ret = 0; int cnt, ret = 0, index;
struct dquot_warn warn[MAXQUOTAS]; struct dquot_warn warn[MAXQUOTAS];
struct dquot **dquots = inode->i_dquot; struct dquot **dquots = inode->i_dquot;
int reserve = flags & DQUOT_SPACE_RESERVE; int reserve = flags & DQUOT_SPACE_RESERVE;
/*
* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex
*/
if (!dquot_active(inode)) { if (!dquot_active(inode)) {
inode_incr_space(inode, number, reserve); inode_incr_space(inode, number, reserve);
goto out; goto out;
...@@ -1616,7 +1606,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) ...@@ -1616,7 +1606,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
for (cnt = 0; cnt < MAXQUOTAS; cnt++) for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warn[cnt].w_type = QUOTA_NL_NOWARN; warn[cnt].w_type = QUOTA_NL_NOWARN;
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); index = srcu_read_lock(&dquot_srcu);
spin_lock(&dq_data_lock); spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) { for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!dquots[cnt]) if (!dquots[cnt])
...@@ -1643,7 +1633,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) ...@@ -1643,7 +1633,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
goto out_flush_warn; goto out_flush_warn;
mark_all_dquot_dirty(dquots); mark_all_dquot_dirty(dquots);
out_flush_warn: out_flush_warn:
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); srcu_read_unlock(&dquot_srcu, index);
flush_warnings(warn); flush_warnings(warn);
out: out:
return ret; return ret;
...@@ -1655,17 +1645,16 @@ EXPORT_SYMBOL(__dquot_alloc_space); ...@@ -1655,17 +1645,16 @@ EXPORT_SYMBOL(__dquot_alloc_space);
*/ */
int dquot_alloc_inode(const struct inode *inode) int dquot_alloc_inode(const struct inode *inode)
{ {
int cnt, ret = 0; int cnt, ret = 0, index;
struct dquot_warn warn[MAXQUOTAS]; struct dquot_warn warn[MAXQUOTAS];
struct dquot * const *dquots = inode->i_dquot; struct dquot * const *dquots = inode->i_dquot;
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
if (!dquot_active(inode)) if (!dquot_active(inode))
return 0; return 0;
for (cnt = 0; cnt < MAXQUOTAS; cnt++) for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warn[cnt].w_type = QUOTA_NL_NOWARN; warn[cnt].w_type = QUOTA_NL_NOWARN;
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
index = srcu_read_lock(&dquot_srcu);
spin_lock(&dq_data_lock); spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) { for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!dquots[cnt]) if (!dquots[cnt])
...@@ -1685,7 +1674,7 @@ int dquot_alloc_inode(const struct inode *inode) ...@@ -1685,7 +1674,7 @@ int dquot_alloc_inode(const struct inode *inode)
spin_unlock(&dq_data_lock); spin_unlock(&dq_data_lock);
if (ret == 0) if (ret == 0)
mark_all_dquot_dirty(dquots); mark_all_dquot_dirty(dquots);
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); srcu_read_unlock(&dquot_srcu, index);
flush_warnings(warn); flush_warnings(warn);
return ret; return ret;
} }
...@@ -1696,14 +1685,14 @@ EXPORT_SYMBOL(dquot_alloc_inode); ...@@ -1696,14 +1685,14 @@ EXPORT_SYMBOL(dquot_alloc_inode);
*/ */
int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
{ {
int cnt; int cnt, index;
if (!dquot_active(inode)) { if (!dquot_active(inode)) {
inode_claim_rsv_space(inode, number); inode_claim_rsv_space(inode, number);
return 0; return 0;
} }
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); index = srcu_read_lock(&dquot_srcu);
spin_lock(&dq_data_lock); spin_lock(&dq_data_lock);
/* Claim reserved quotas to allocated quotas */ /* Claim reserved quotas to allocated quotas */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) { for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
...@@ -1715,7 +1704,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) ...@@ -1715,7 +1704,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
inode_claim_rsv_space(inode, number); inode_claim_rsv_space(inode, number);
spin_unlock(&dq_data_lock); spin_unlock(&dq_data_lock);
mark_all_dquot_dirty(inode->i_dquot); mark_all_dquot_dirty(inode->i_dquot);
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); srcu_read_unlock(&dquot_srcu, index);
return 0; return 0;
} }
EXPORT_SYMBOL(dquot_claim_space_nodirty); EXPORT_SYMBOL(dquot_claim_space_nodirty);
...@@ -1725,14 +1714,14 @@ EXPORT_SYMBOL(dquot_claim_space_nodirty); ...@@ -1725,14 +1714,14 @@ EXPORT_SYMBOL(dquot_claim_space_nodirty);
*/ */
void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
{ {
int cnt; int cnt, index;
if (!dquot_active(inode)) { if (!dquot_active(inode)) {
inode_reclaim_rsv_space(inode, number); inode_reclaim_rsv_space(inode, number);
return; return;
} }
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); index = srcu_read_lock(&dquot_srcu);
spin_lock(&dq_data_lock); spin_lock(&dq_data_lock);
/* Claim reserved quotas to allocated quotas */ /* Claim reserved quotas to allocated quotas */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) { for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
...@@ -1744,7 +1733,7 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) ...@@ -1744,7 +1733,7 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
inode_reclaim_rsv_space(inode, number); inode_reclaim_rsv_space(inode, number);
spin_unlock(&dq_data_lock); spin_unlock(&dq_data_lock);
mark_all_dquot_dirty(inode->i_dquot); mark_all_dquot_dirty(inode->i_dquot);
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); srcu_read_unlock(&dquot_srcu, index);
return; return;
} }
EXPORT_SYMBOL(dquot_reclaim_space_nodirty); EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
...@@ -1757,16 +1746,14 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags) ...@@ -1757,16 +1746,14 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
unsigned int cnt; unsigned int cnt;
struct dquot_warn warn[MAXQUOTAS]; struct dquot_warn warn[MAXQUOTAS];
struct dquot **dquots = inode->i_dquot; struct dquot **dquots = inode->i_dquot;
int reserve = flags & DQUOT_SPACE_RESERVE; int reserve = flags & DQUOT_SPACE_RESERVE, index;
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
if (!dquot_active(inode)) { if (!dquot_active(inode)) {
inode_decr_space(inode, number, reserve); inode_decr_space(inode, number, reserve);
return; return;
} }
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); index = srcu_read_lock(&dquot_srcu);
spin_lock(&dq_data_lock); spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) { for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
int wtype; int wtype;
...@@ -1789,7 +1776,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags) ...@@ -1789,7 +1776,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
goto out_unlock; goto out_unlock;
mark_all_dquot_dirty(dquots); mark_all_dquot_dirty(dquots);
out_unlock: out_unlock:
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); srcu_read_unlock(&dquot_srcu, index);
flush_warnings(warn); flush_warnings(warn);
} }
EXPORT_SYMBOL(__dquot_free_space); EXPORT_SYMBOL(__dquot_free_space);
...@@ -1802,13 +1789,12 @@ void dquot_free_inode(const struct inode *inode) ...@@ -1802,13 +1789,12 @@ void dquot_free_inode(const struct inode *inode)
unsigned int cnt; unsigned int cnt;
struct dquot_warn warn[MAXQUOTAS]; struct dquot_warn warn[MAXQUOTAS];
struct dquot * const *dquots = inode->i_dquot; struct dquot * const *dquots = inode->i_dquot;
int index;
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
if (!dquot_active(inode)) if (!dquot_active(inode))
return; return;
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); index = srcu_read_lock(&dquot_srcu);
spin_lock(&dq_data_lock); spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) { for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
int wtype; int wtype;
...@@ -1823,7 +1809,7 @@ void dquot_free_inode(const struct inode *inode) ...@@ -1823,7 +1809,7 @@ void dquot_free_inode(const struct inode *inode)
} }
spin_unlock(&dq_data_lock); spin_unlock(&dq_data_lock);
mark_all_dquot_dirty(dquots); mark_all_dquot_dirty(dquots);
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); srcu_read_unlock(&dquot_srcu, index);
flush_warnings(warn); flush_warnings(warn);
} }
EXPORT_SYMBOL(dquot_free_inode); EXPORT_SYMBOL(dquot_free_inode);
...@@ -1837,6 +1823,8 @@ EXPORT_SYMBOL(dquot_free_inode); ...@@ -1837,6 +1823,8 @@ EXPORT_SYMBOL(dquot_free_inode);
* This operation can block, but only after everything is updated * This operation can block, but only after everything is updated
* A transaction must be started when entering this function. * A transaction must be started when entering this function.
* *
* We are holding reference on transfer_from & transfer_to, no need to
* protect them by srcu_read_lock().
*/ */
int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
{ {
...@@ -1849,8 +1837,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) ...@@ -1849,8 +1837,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
struct dquot_warn warn_from_inodes[MAXQUOTAS]; struct dquot_warn warn_from_inodes[MAXQUOTAS];
struct dquot_warn warn_from_space[MAXQUOTAS]; struct dquot_warn warn_from_space[MAXQUOTAS];
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode)) if (IS_NOQUOTA(inode))
return 0; return 0;
/* Initialize the arrays */ /* Initialize the arrays */
...@@ -1859,12 +1845,12 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) ...@@ -1859,12 +1845,12 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN; warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN;
warn_from_space[cnt].w_type = QUOTA_NL_NOWARN; warn_from_space[cnt].w_type = QUOTA_NL_NOWARN;
} }
down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
spin_lock(&dq_data_lock);
if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); spin_unlock(&dq_data_lock);
return 0; return 0;
} }
spin_lock(&dq_data_lock);
cur_space = inode_get_bytes(inode); cur_space = inode_get_bytes(inode);
rsv_space = inode_get_rsv_space(inode); rsv_space = inode_get_rsv_space(inode);
space = cur_space + rsv_space; space = cur_space + rsv_space;
...@@ -1918,7 +1904,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) ...@@ -1918,7 +1904,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
inode->i_dquot[cnt] = transfer_to[cnt]; inode->i_dquot[cnt] = transfer_to[cnt];
} }
spin_unlock(&dq_data_lock); spin_unlock(&dq_data_lock);
up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
mark_all_dquot_dirty(transfer_from); mark_all_dquot_dirty(transfer_from);
mark_all_dquot_dirty(transfer_to); mark_all_dquot_dirty(transfer_to);
...@@ -1932,7 +1917,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) ...@@ -1932,7 +1917,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
return 0; return 0;
over_quota: over_quota:
spin_unlock(&dq_data_lock); spin_unlock(&dq_data_lock);
up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
flush_warnings(warn_to); flush_warnings(warn_to);
return ret; return ret;
} }
......
...@@ -55,7 +55,7 @@ EXPORT_SYMBOL(qid_lt); ...@@ -55,7 +55,7 @@ EXPORT_SYMBOL(qid_lt);
/** /**
* from_kqid - Create a qid from a kqid user-namespace pair. * from_kqid - Create a qid from a kqid user-namespace pair.
* @targ: The user namespace we want a qid in. * @targ: The user namespace we want a qid in.
* @kuid: The kernel internal quota identifier to start with. * @kqid: The kernel internal quota identifier to start with.
* *
* Map @kqid into the user-namespace specified by @targ and * Map @kqid into the user-namespace specified by @targ and
* return the resulting qid. * return the resulting qid.
......
...@@ -32,8 +32,7 @@ static struct genl_family quota_genl_family = { ...@@ -32,8 +32,7 @@ static struct genl_family quota_genl_family = {
/** /**
* quota_send_warning - Send warning to userspace about exceeded quota * quota_send_warning - Send warning to userspace about exceeded quota
* @type: The quota type: USRQQUOTA, GRPQUOTA,... * @qid: The kernel internal quota identifier.
* @id: The user or group id of the quota that was exceeded
* @dev: The device on which the fs is mounted (sb->s_dev) * @dev: The device on which the fs is mounted (sb->s_dev)
* @warntype: The type of the warning: QUOTA_NL_... * @warntype: The type of the warning: QUOTA_NL_...
* *
......
...@@ -79,13 +79,13 @@ static int quota_getfmt(struct super_block *sb, int type, void __user *addr) ...@@ -79,13 +79,13 @@ static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
{ {
__u32 fmt; __u32 fmt;
down_read(&sb_dqopt(sb)->dqptr_sem); mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
if (!sb_has_quota_active(sb, type)) { if (!sb_has_quota_active(sb, type)) {
up_read(&sb_dqopt(sb)->dqptr_sem); mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return -ESRCH; return -ESRCH;
} }
fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
up_read(&sb_dqopt(sb)->dqptr_sem); mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
if (copy_to_user(addr, &fmt, sizeof(fmt))) if (copy_to_user(addr, &fmt, sizeof(fmt)))
return -EFAULT; return -EFAULT;
return 0; return 0;
......
...@@ -286,12 +286,14 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag) ...@@ -286,12 +286,14 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
return 0; return 0;
} }
static void balance_leaf_insert_left(struct tree_balance *tb, static unsigned int balance_leaf_insert_left(struct tree_balance *tb,
struct item_head *ih, const char *body) struct item_head *const ih,
const char * const body)
{ {
int ret; int ret;
struct buffer_info bi; struct buffer_info bi;
int n = B_NR_ITEMS(tb->L[0]); int n = B_NR_ITEMS(tb->L[0]);
unsigned body_shift_bytes = 0;
if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) { if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
/* part of new item falls into L[0] */ /* part of new item falls into L[0] */
...@@ -329,7 +331,7 @@ static void balance_leaf_insert_left(struct tree_balance *tb, ...@@ -329,7 +331,7 @@ static void balance_leaf_insert_left(struct tree_balance *tb,
put_ih_item_len(ih, new_item_len); put_ih_item_len(ih, new_item_len);
if (tb->lbytes > tb->zeroes_num) { if (tb->lbytes > tb->zeroes_num) {
body += (tb->lbytes - tb->zeroes_num); body_shift_bytes = tb->lbytes - tb->zeroes_num;
tb->zeroes_num = 0; tb->zeroes_num = 0;
} else } else
tb->zeroes_num -= tb->lbytes; tb->zeroes_num -= tb->lbytes;
...@@ -349,11 +351,12 @@ static void balance_leaf_insert_left(struct tree_balance *tb, ...@@ -349,11 +351,12 @@ static void balance_leaf_insert_left(struct tree_balance *tb,
tb->insert_size[0] = 0; tb->insert_size[0] = 0;
tb->zeroes_num = 0; tb->zeroes_num = 0;
} }
return body_shift_bytes;
} }
static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb, static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb,
struct item_head *ih, struct item_head * const ih,
const char *body) const char * const body)
{ {
int n = B_NR_ITEMS(tb->L[0]); int n = B_NR_ITEMS(tb->L[0]);
struct buffer_info bi; struct buffer_info bi;
...@@ -413,17 +416,18 @@ static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb, ...@@ -413,17 +416,18 @@ static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb,
tb->pos_in_item -= tb->lbytes; tb->pos_in_item -= tb->lbytes;
} }
static void balance_leaf_paste_left_shift(struct tree_balance *tb, static unsigned int balance_leaf_paste_left_shift(struct tree_balance *tb,
struct item_head *ih, struct item_head * const ih,
const char *body) const char * const body)
{ {
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
int n = B_NR_ITEMS(tb->L[0]); int n = B_NR_ITEMS(tb->L[0]);
struct buffer_info bi; struct buffer_info bi;
int body_shift_bytes = 0;
if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) { if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) {
balance_leaf_paste_left_shift_dirent(tb, ih, body); balance_leaf_paste_left_shift_dirent(tb, ih, body);
return; return 0;
} }
RFALSE(tb->lbytes <= 0, RFALSE(tb->lbytes <= 0,
...@@ -497,7 +501,7 @@ static void balance_leaf_paste_left_shift(struct tree_balance *tb, ...@@ -497,7 +501,7 @@ static void balance_leaf_paste_left_shift(struct tree_balance *tb,
* insert_size[0] * insert_size[0]
*/ */
if (l_n > tb->zeroes_num) { if (l_n > tb->zeroes_num) {
body += (l_n - tb->zeroes_num); body_shift_bytes = l_n - tb->zeroes_num;
tb->zeroes_num = 0; tb->zeroes_num = 0;
} else } else
tb->zeroes_num -= l_n; tb->zeroes_num -= l_n;
...@@ -526,13 +530,14 @@ static void balance_leaf_paste_left_shift(struct tree_balance *tb, ...@@ -526,13 +530,14 @@ static void balance_leaf_paste_left_shift(struct tree_balance *tb,
*/ */
leaf_shift_left(tb, tb->lnum[0], tb->lbytes); leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
} }
return body_shift_bytes;
} }
/* appended item will be in L[0] in whole */ /* appended item will be in L[0] in whole */
static void balance_leaf_paste_left_whole(struct tree_balance *tb, static void balance_leaf_paste_left_whole(struct tree_balance *tb,
struct item_head *ih, struct item_head * const ih,
const char *body) const char * const body)
{ {
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
int n = B_NR_ITEMS(tb->L[0]); int n = B_NR_ITEMS(tb->L[0]);
...@@ -584,39 +589,44 @@ static void balance_leaf_paste_left_whole(struct tree_balance *tb, ...@@ -584,39 +589,44 @@ static void balance_leaf_paste_left_whole(struct tree_balance *tb,
tb->zeroes_num = 0; tb->zeroes_num = 0;
} }
static void balance_leaf_paste_left(struct tree_balance *tb, static unsigned int balance_leaf_paste_left(struct tree_balance *tb,
struct item_head *ih, const char *body) struct item_head * const ih,
const char * const body)
{ {
/* we must shift the part of the appended item */ /* we must shift the part of the appended item */
if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1)
balance_leaf_paste_left_shift(tb, ih, body); return balance_leaf_paste_left_shift(tb, ih, body);
else else
balance_leaf_paste_left_whole(tb, ih, body); balance_leaf_paste_left_whole(tb, ih, body);
return 0;
} }
/* Shift lnum[0] items from S[0] to the left neighbor L[0] */ /* Shift lnum[0] items from S[0] to the left neighbor L[0] */
static void balance_leaf_left(struct tree_balance *tb, struct item_head *ih, static unsigned int balance_leaf_left(struct tree_balance *tb,
const char *body, int flag) struct item_head * const ih,
const char * const body, int flag)
{ {
if (tb->lnum[0] <= 0) if (tb->lnum[0] <= 0)
return; return 0;
/* new item or it part falls to L[0], shift it too */ /* new item or it part falls to L[0], shift it too */
if (tb->item_pos < tb->lnum[0]) { if (tb->item_pos < tb->lnum[0]) {
BUG_ON(flag != M_INSERT && flag != M_PASTE); BUG_ON(flag != M_INSERT && flag != M_PASTE);
if (flag == M_INSERT) if (flag == M_INSERT)
balance_leaf_insert_left(tb, ih, body); return balance_leaf_insert_left(tb, ih, body);
else /* M_PASTE */ else /* M_PASTE */
balance_leaf_paste_left(tb, ih, body); return balance_leaf_paste_left(tb, ih, body);
} else } else
/* new item doesn't fall into L[0] */ /* new item doesn't fall into L[0] */
leaf_shift_left(tb, tb->lnum[0], tb->lbytes); leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
return 0;
} }
static void balance_leaf_insert_right(struct tree_balance *tb, static void balance_leaf_insert_right(struct tree_balance *tb,
struct item_head *ih, const char *body) struct item_head * const ih,
const char * const body)
{ {
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
...@@ -704,7 +714,8 @@ static void balance_leaf_insert_right(struct tree_balance *tb, ...@@ -704,7 +714,8 @@ static void balance_leaf_insert_right(struct tree_balance *tb,
static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb, static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb,
struct item_head *ih, const char *body) struct item_head * const ih,
const char * const body)
{ {
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
struct buffer_info bi; struct buffer_info bi;
...@@ -754,7 +765,8 @@ static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb, ...@@ -754,7 +765,8 @@ static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb,
} }
static void balance_leaf_paste_right_shift(struct tree_balance *tb, static void balance_leaf_paste_right_shift(struct tree_balance *tb,
struct item_head *ih, const char *body) struct item_head * const ih,
const char * const body)
{ {
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
int n_shift, n_rem, r_zeroes_number, version; int n_shift, n_rem, r_zeroes_number, version;
...@@ -831,7 +843,8 @@ static void balance_leaf_paste_right_shift(struct tree_balance *tb, ...@@ -831,7 +843,8 @@ static void balance_leaf_paste_right_shift(struct tree_balance *tb,
} }
static void balance_leaf_paste_right_whole(struct tree_balance *tb, static void balance_leaf_paste_right_whole(struct tree_balance *tb,
struct item_head *ih, const char *body) struct item_head * const ih,
const char * const body)
{ {
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
int n = B_NR_ITEMS(tbS0); int n = B_NR_ITEMS(tbS0);
...@@ -874,7 +887,8 @@ static void balance_leaf_paste_right_whole(struct tree_balance *tb, ...@@ -874,7 +887,8 @@ static void balance_leaf_paste_right_whole(struct tree_balance *tb,
} }
static void balance_leaf_paste_right(struct tree_balance *tb, static void balance_leaf_paste_right(struct tree_balance *tb,
struct item_head *ih, const char *body) struct item_head * const ih,
const char * const body)
{ {
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
int n = B_NR_ITEMS(tbS0); int n = B_NR_ITEMS(tbS0);
...@@ -896,8 +910,9 @@ static void balance_leaf_paste_right(struct tree_balance *tb, ...@@ -896,8 +910,9 @@ static void balance_leaf_paste_right(struct tree_balance *tb,
} }
/* shift rnum[0] items from S[0] to the right neighbor R[0] */ /* shift rnum[0] items from S[0] to the right neighbor R[0] */
static void balance_leaf_right(struct tree_balance *tb, struct item_head *ih, static void balance_leaf_right(struct tree_balance *tb,
const char *body, int flag) struct item_head * const ih,
const char * const body, int flag)
{ {
if (tb->rnum[0] <= 0) if (tb->rnum[0] <= 0)
return; return;
...@@ -911,8 +926,8 @@ static void balance_leaf_right(struct tree_balance *tb, struct item_head *ih, ...@@ -911,8 +926,8 @@ static void balance_leaf_right(struct tree_balance *tb, struct item_head *ih,
} }
static void balance_leaf_new_nodes_insert(struct tree_balance *tb, static void balance_leaf_new_nodes_insert(struct tree_balance *tb,
struct item_head *ih, struct item_head * const ih,
const char *body, const char * const body,
struct item_head *insert_key, struct item_head *insert_key,
struct buffer_head **insert_ptr, struct buffer_head **insert_ptr,
int i) int i)
...@@ -1003,8 +1018,8 @@ static void balance_leaf_new_nodes_insert(struct tree_balance *tb, ...@@ -1003,8 +1018,8 @@ static void balance_leaf_new_nodes_insert(struct tree_balance *tb,
/* we append to directory item */ /* we append to directory item */
static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb, static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb,
struct item_head *ih, struct item_head * const ih,
const char *body, const char * const body,
struct item_head *insert_key, struct item_head *insert_key,
struct buffer_head **insert_ptr, struct buffer_head **insert_ptr,
int i) int i)
...@@ -1058,8 +1073,8 @@ static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb, ...@@ -1058,8 +1073,8 @@ static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb,
} }
static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb, static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb,
struct item_head *ih, struct item_head * const ih,
const char *body, const char * const body,
struct item_head *insert_key, struct item_head *insert_key,
struct buffer_head **insert_ptr, struct buffer_head **insert_ptr,
int i) int i)
...@@ -1131,8 +1146,8 @@ static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb, ...@@ -1131,8 +1146,8 @@ static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb,
} }
static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb, static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb,
struct item_head *ih, struct item_head * const ih,
const char *body, const char * const body,
struct item_head *insert_key, struct item_head *insert_key,
struct buffer_head **insert_ptr, struct buffer_head **insert_ptr,
int i) int i)
...@@ -1184,8 +1199,8 @@ static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb, ...@@ -1184,8 +1199,8 @@ static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb,
} }
static void balance_leaf_new_nodes_paste(struct tree_balance *tb, static void balance_leaf_new_nodes_paste(struct tree_balance *tb,
struct item_head *ih, struct item_head * const ih,
const char *body, const char * const body,
struct item_head *insert_key, struct item_head *insert_key,
struct buffer_head **insert_ptr, struct buffer_head **insert_ptr,
int i) int i)
...@@ -1214,8 +1229,8 @@ static void balance_leaf_new_nodes_paste(struct tree_balance *tb, ...@@ -1214,8 +1229,8 @@ static void balance_leaf_new_nodes_paste(struct tree_balance *tb,
/* Fill new nodes that appear in place of S[0] */ /* Fill new nodes that appear in place of S[0] */
static void balance_leaf_new_nodes(struct tree_balance *tb, static void balance_leaf_new_nodes(struct tree_balance *tb,
struct item_head *ih, struct item_head * const ih,
const char *body, const char * const body,
struct item_head *insert_key, struct item_head *insert_key,
struct buffer_head **insert_ptr, struct buffer_head **insert_ptr,
int flag) int flag)
...@@ -1254,8 +1269,8 @@ static void balance_leaf_new_nodes(struct tree_balance *tb, ...@@ -1254,8 +1269,8 @@ static void balance_leaf_new_nodes(struct tree_balance *tb,
} }
static void balance_leaf_finish_node_insert(struct tree_balance *tb, static void balance_leaf_finish_node_insert(struct tree_balance *tb,
struct item_head *ih, struct item_head * const ih,
const char *body) const char * const body)
{ {
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
struct buffer_info bi; struct buffer_info bi;
...@@ -1271,8 +1286,8 @@ static void balance_leaf_finish_node_insert(struct tree_balance *tb, ...@@ -1271,8 +1286,8 @@ static void balance_leaf_finish_node_insert(struct tree_balance *tb,
} }
static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb, static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb,
struct item_head *ih, struct item_head * const ih,
const char *body) const char * const body)
{ {
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
struct item_head *pasted = item_head(tbS0, tb->item_pos); struct item_head *pasted = item_head(tbS0, tb->item_pos);
...@@ -1305,8 +1320,8 @@ static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb, ...@@ -1305,8 +1320,8 @@ static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb,
} }
static void balance_leaf_finish_node_paste(struct tree_balance *tb, static void balance_leaf_finish_node_paste(struct tree_balance *tb,
struct item_head *ih, struct item_head * const ih,
const char *body) const char * const body)
{ {
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
struct buffer_info bi; struct buffer_info bi;
...@@ -1349,8 +1364,8 @@ static void balance_leaf_finish_node_paste(struct tree_balance *tb, ...@@ -1349,8 +1364,8 @@ static void balance_leaf_finish_node_paste(struct tree_balance *tb,
* of the affected item which remains in S * of the affected item which remains in S
*/ */
static void balance_leaf_finish_node(struct tree_balance *tb, static void balance_leaf_finish_node(struct tree_balance *tb,
struct item_head *ih, struct item_head * const ih,
const char *body, int flag) const char * const body, int flag)
{ {
/* if we must insert or append into buffer S[0] */ /* if we must insert or append into buffer S[0] */
if (0 <= tb->item_pos && tb->item_pos < tb->s0num) { if (0 <= tb->item_pos && tb->item_pos < tb->s0num) {
...@@ -1402,7 +1417,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, ...@@ -1402,7 +1417,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih,
&& is_indirect_le_ih(item_head(tbS0, tb->item_pos))) && is_indirect_le_ih(item_head(tbS0, tb->item_pos)))
tb->pos_in_item *= UNFM_P_SIZE; tb->pos_in_item *= UNFM_P_SIZE;
balance_leaf_left(tb, ih, body, flag); body += balance_leaf_left(tb, ih, body, flag);
/* tb->lnum[0] > 0 */ /* tb->lnum[0] > 0 */
/* Calculate new item position */ /* Calculate new item position */
......
...@@ -1947,8 +1947,6 @@ static int do_journal_release(struct reiserfs_transaction_handle *th, ...@@ -1947,8 +1947,6 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
} }
} }
/* wait for all commits to finish */
cancel_delayed_work(&SB_JOURNAL(sb)->j_work);
/* /*
* We must release the write lock here because * We must release the write lock here because
...@@ -1956,8 +1954,14 @@ static int do_journal_release(struct reiserfs_transaction_handle *th, ...@@ -1956,8 +1954,14 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
*/ */
reiserfs_write_unlock(sb); reiserfs_write_unlock(sb);
/*
* Cancel flushing of old commits. Note that neither of these works
* will be requeued because superblock is being shutdown and doesn't
* have MS_ACTIVE set.
*/
cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work); cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work);
flush_workqueue(REISERFS_SB(sb)->commit_wq); /* wait for all commits to finish */
cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work);
free_journal_ram(sb); free_journal_ram(sb);
...@@ -4292,9 +4296,15 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, int flags) ...@@ -4292,9 +4296,15 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, int flags)
if (flush) { if (flush) {
flush_commit_list(sb, jl, 1); flush_commit_list(sb, jl, 1);
flush_journal_list(sb, jl, 1); flush_journal_list(sb, jl, 1);
} else if (!(jl->j_state & LIST_COMMIT_PENDING)) } else if (!(jl->j_state & LIST_COMMIT_PENDING)) {
/*
* Avoid queueing work when sb is being shut down. Transaction
* will be flushed on journal shutdown.
*/
if (sb->s_flags & MS_ACTIVE)
queue_delayed_work(REISERFS_SB(sb)->commit_wq, queue_delayed_work(REISERFS_SB(sb)->commit_wq,
&journal->j_work, HZ / 10); &journal->j_work, HZ / 10);
}
/* /*
* if the next transaction has any chance of wrapping, flush * if the next transaction has any chance of wrapping, flush
......
...@@ -899,8 +899,9 @@ void leaf_delete_items(struct buffer_info *cur_bi, int last_first, ...@@ -899,8 +899,9 @@ void leaf_delete_items(struct buffer_info *cur_bi, int last_first,
/* insert item into the leaf node in position before */ /* insert item into the leaf node in position before */
void leaf_insert_into_buf(struct buffer_info *bi, int before, void leaf_insert_into_buf(struct buffer_info *bi, int before,
struct item_head *inserted_item_ih, struct item_head * const inserted_item_ih,
const char *inserted_item_body, int zeros_number) const char * const inserted_item_body,
int zeros_number)
{ {
struct buffer_head *bh = bi->bi_bh; struct buffer_head *bh = bi->bi_bh;
int nr, free_space; int nr, free_space;
......
...@@ -3216,11 +3216,12 @@ int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes); ...@@ -3216,11 +3216,12 @@ int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes);
void leaf_delete_items(struct buffer_info *cur_bi, int last_first, int first, void leaf_delete_items(struct buffer_info *cur_bi, int last_first, int first,
int del_num, int del_bytes); int del_num, int del_bytes);
void leaf_insert_into_buf(struct buffer_info *bi, int before, void leaf_insert_into_buf(struct buffer_info *bi, int before,
struct item_head *inserted_item_ih, struct item_head * const inserted_item_ih,
const char *inserted_item_body, int zeros_number); const char * const inserted_item_body,
void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num,
int pos_in_item, int paste_size, const char *body,
int zeros_number); int zeros_number);
void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num,
int pos_in_item, int paste_size,
const char * const body, int zeros_number);
void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num, void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num,
int pos_in_item, int cut_size); int pos_in_item, int cut_size);
void leaf_paste_entries(struct buffer_info *bi, int item_num, int before, void leaf_paste_entries(struct buffer_info *bi, int item_num, int before,
......
...@@ -100,7 +100,11 @@ void reiserfs_schedule_old_flush(struct super_block *s) ...@@ -100,7 +100,11 @@ void reiserfs_schedule_old_flush(struct super_block *s)
struct reiserfs_sb_info *sbi = REISERFS_SB(s); struct reiserfs_sb_info *sbi = REISERFS_SB(s);
unsigned long delay; unsigned long delay;
if (s->s_flags & MS_RDONLY) /*
* Avoid scheduling flush when sb is being shut down. It can race
* with journal shutdown and free still queued delayed work.
*/
if (s->s_flags & MS_RDONLY || !(s->s_flags & MS_ACTIVE))
return; return;
spin_lock(&sbi->old_work_lock); spin_lock(&sbi->old_work_lock);
......
...@@ -217,7 +217,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) ...@@ -217,7 +217,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key); lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
mutex_init(&s->s_dquot.dqio_mutex); mutex_init(&s->s_dquot.dqio_mutex);
mutex_init(&s->s_dquot.dqonoff_mutex); mutex_init(&s->s_dquot.dqonoff_mutex);
init_rwsem(&s->s_dquot.dqptr_sem);
s->s_maxbytes = MAX_NON_LFS; s->s_maxbytes = MAX_NON_LFS;
s->s_op = &default_op; s->s_op = &default_op;
s->s_time_gran = 1000000000; s->s_time_gran = 1000000000;
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include "udfdecl.h" #include "udfdecl.h"
#include <linux/fs.h> #include <linux/fs.h>
#include <asm/uaccess.h> #include <linux/uaccess.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/string.h> /* memset */ #include <linux/string.h> /* memset */
#include <linux/capability.h> #include <linux/capability.h>
...@@ -100,24 +100,6 @@ static int udf_adinicb_write_begin(struct file *file, ...@@ -100,24 +100,6 @@ static int udf_adinicb_write_begin(struct file *file,
return 0; return 0;
} }
static int udf_adinicb_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
struct inode *inode = mapping->host;
unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
char *kaddr;
struct udf_inode_info *iinfo = UDF_I(inode);
kaddr = kmap_atomic(page);
memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset,
kaddr + offset, copied);
kunmap_atomic(kaddr);
return simple_write_end(file, mapping, pos, len, copied, page, fsdata);
}
static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb, static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb,
struct iov_iter *iter, struct iov_iter *iter,
loff_t offset) loff_t offset)
...@@ -130,7 +112,7 @@ const struct address_space_operations udf_adinicb_aops = { ...@@ -130,7 +112,7 @@ const struct address_space_operations udf_adinicb_aops = {
.readpage = udf_adinicb_readpage, .readpage = udf_adinicb_readpage,
.writepage = udf_adinicb_writepage, .writepage = udf_adinicb_writepage,
.write_begin = udf_adinicb_write_begin, .write_begin = udf_adinicb_write_begin,
.write_end = udf_adinicb_write_end, .write_end = simple_write_end,
.direct_IO = udf_adinicb_direct_IO, .direct_IO = udf_adinicb_direct_IO,
}; };
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/cdrom.h> #include <linux/cdrom.h>
#include <asm/uaccess.h> #include <linux/uaccess.h>
#include "udf_sb.h" #include "udf_sb.h"
......
...@@ -63,7 +63,7 @@ ...@@ -63,7 +63,7 @@
#include "udf_i.h" #include "udf_i.h"
#include <linux/init.h> #include <linux/init.h>
#include <asm/uaccess.h> #include <linux/uaccess.h>
#define VDS_POS_PRIMARY_VOL_DESC 0 #define VDS_POS_PRIMARY_VOL_DESC 0
#define VDS_POS_UNALLOC_SPACE_DESC 1 #define VDS_POS_UNALLOC_SPACE_DESC 1
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
*/ */
#include "udfdecl.h" #include "udfdecl.h"
#include <asm/uaccess.h> #include <linux/uaccess.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/time.h> #include <linux/time.h>
......
...@@ -412,7 +412,6 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, ...@@ -412,7 +412,6 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
int extIndex = 0, newExtIndex = 0, hasExt = 0; int extIndex = 0, newExtIndex = 0, hasExt = 0;
unsigned short valueCRC; unsigned short valueCRC;
uint8_t curr; uint8_t curr;
const uint8_t hexChar[] = "0123456789ABCDEF";
if (udfName[0] == '.' && if (udfName[0] == '.' &&
(udfLen == 1 || (udfLen == 2 && udfName[1] == '.'))) { (udfLen == 1 || (udfLen == 2 && udfName[1] == '.'))) {
...@@ -477,10 +476,10 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, ...@@ -477,10 +476,10 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
newIndex = 250; newIndex = 250;
newName[newIndex++] = CRC_MARK; newName[newIndex++] = CRC_MARK;
valueCRC = crc_itu_t(0, fidName, fidNameLen); valueCRC = crc_itu_t(0, fidName, fidNameLen);
newName[newIndex++] = hexChar[(valueCRC & 0xf000) >> 12]; newName[newIndex++] = hex_asc_upper_hi(valueCRC >> 8);
newName[newIndex++] = hexChar[(valueCRC & 0x0f00) >> 8]; newName[newIndex++] = hex_asc_upper_lo(valueCRC >> 8);
newName[newIndex++] = hexChar[(valueCRC & 0x00f0) >> 4]; newName[newIndex++] = hex_asc_upper_hi(valueCRC);
newName[newIndex++] = hexChar[(valueCRC & 0x000f)]; newName[newIndex++] = hex_asc_upper_lo(valueCRC);
if (hasExt) { if (hasExt) {
newName[newIndex++] = EXT_MARK; newName[newIndex++] = EXT_MARK;
......
...@@ -390,7 +390,6 @@ struct quota_info { ...@@ -390,7 +390,6 @@ struct quota_info {
unsigned int flags; /* Flags for diskquotas on this device */ unsigned int flags; /* Flags for diskquotas on this device */
struct mutex dqio_mutex; /* lock device while I/O in progress */ struct mutex dqio_mutex; /* lock device while I/O in progress */
struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */ struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */
struct rw_semaphore dqptr_sem; /* serialize ops using quota_info struct, pointers from inode to dquots */
struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */ struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */
struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */ struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */
const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment