Commit d515afe8 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

tmpfs: no need to use i_lock

2.6.36's 7e496299 ("tmpfs: make tmpfs scalable with percpu_counter for
used blocks") to make tmpfs scalable with percpu_counter used
inode->i_lock in place of sbinfo->stat_lock around i_blocks updates; but
that was adverse to scalability, and unnecessary, since info->lock is
already held there in the fast paths.

Remove those uses of i_lock, and add info->lock in the three error paths
where it's then needed across shmem_free_blocks().  It's not actually
needed across shmem_unacct_blocks(), but they're so often paired that it
looks wrong to split them apart.
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Acked-by: default avatarTim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d0823576
...@@ -241,9 +241,7 @@ static void shmem_free_blocks(struct inode *inode, long pages) ...@@ -241,9 +241,7 @@ static void shmem_free_blocks(struct inode *inode, long pages)
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
if (sbinfo->max_blocks) { if (sbinfo->max_blocks) {
percpu_counter_add(&sbinfo->used_blocks, -pages); percpu_counter_add(&sbinfo->used_blocks, -pages);
spin_lock(&inode->i_lock);
inode->i_blocks -= pages*BLOCKS_PER_PAGE; inode->i_blocks -= pages*BLOCKS_PER_PAGE;
spin_unlock(&inode->i_lock);
} }
} }
...@@ -432,9 +430,7 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long ...@@ -432,9 +430,7 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
sbinfo->max_blocks - 1) >= 0) sbinfo->max_blocks - 1) >= 0)
return ERR_PTR(-ENOSPC); return ERR_PTR(-ENOSPC);
percpu_counter_inc(&sbinfo->used_blocks); percpu_counter_inc(&sbinfo->used_blocks);
spin_lock(&inode->i_lock);
inode->i_blocks += BLOCKS_PER_PAGE; inode->i_blocks += BLOCKS_PER_PAGE;
spin_unlock(&inode->i_lock);
} }
spin_unlock(&info->lock); spin_unlock(&info->lock);
...@@ -1421,9 +1417,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, ...@@ -1421,9 +1417,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
shmem_acct_block(info->flags)) shmem_acct_block(info->flags))
goto nospace; goto nospace;
percpu_counter_inc(&sbinfo->used_blocks); percpu_counter_inc(&sbinfo->used_blocks);
spin_lock(&inode->i_lock);
inode->i_blocks += BLOCKS_PER_PAGE; inode->i_blocks += BLOCKS_PER_PAGE;
spin_unlock(&inode->i_lock);
} else if (shmem_acct_block(info->flags)) } else if (shmem_acct_block(info->flags))
goto nospace; goto nospace;
...@@ -1434,8 +1428,10 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, ...@@ -1434,8 +1428,10 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
spin_unlock(&info->lock); spin_unlock(&info->lock);
filepage = shmem_alloc_page(gfp, info, idx); filepage = shmem_alloc_page(gfp, info, idx);
if (!filepage) { if (!filepage) {
spin_lock(&info->lock);
shmem_unacct_blocks(info->flags, 1); shmem_unacct_blocks(info->flags, 1);
shmem_free_blocks(inode, 1); shmem_free_blocks(inode, 1);
spin_unlock(&info->lock);
error = -ENOMEM; error = -ENOMEM;
goto failed; goto failed;
} }
...@@ -1449,8 +1445,10 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, ...@@ -1449,8 +1445,10 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
current->mm, GFP_KERNEL); current->mm, GFP_KERNEL);
if (error) { if (error) {
page_cache_release(filepage); page_cache_release(filepage);
spin_lock(&info->lock);
shmem_unacct_blocks(info->flags, 1); shmem_unacct_blocks(info->flags, 1);
shmem_free_blocks(inode, 1); shmem_free_blocks(inode, 1);
spin_unlock(&info->lock);
filepage = NULL; filepage = NULL;
goto failed; goto failed;
} }
...@@ -1480,10 +1478,10 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, ...@@ -1480,10 +1478,10 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
* be done automatically. * be done automatically.
*/ */
if (ret) { if (ret) {
spin_unlock(&info->lock);
page_cache_release(filepage);
shmem_unacct_blocks(info->flags, 1); shmem_unacct_blocks(info->flags, 1);
shmem_free_blocks(inode, 1); shmem_free_blocks(inode, 1);
spin_unlock(&info->lock);
page_cache_release(filepage);
filepage = NULL; filepage = NULL;
if (error) if (error)
goto failed; goto failed;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment