Commit 0f079694 authored by Mike Rapoport's avatar Mike Rapoport Committed by Linus Torvalds

shmem: introduce shmem_inode_acct_block

The shmem_acct_block and the update of used_blocks are following one
another in all the places they are used.  Combine these two into a
helper function.

Link: http://lkml.kernel.org/r/1497939652-16528-3-git-send-email-rppt@linux.vnet.ibm.comSigned-off-by: default avatarMike Rapoport <rppt@linux.vnet.ibm.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Pavel Emelyanov <xemul@virtuozzo.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b1cc94ab
...@@ -188,6 +188,38 @@ static inline void shmem_unacct_blocks(unsigned long flags, long pages) ...@@ -188,6 +188,38 @@ static inline void shmem_unacct_blocks(unsigned long flags, long pages)
vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
} }
static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
{
struct shmem_inode_info *info = SHMEM_I(inode);
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
if (shmem_acct_block(info->flags, pages))
return false;
if (sbinfo->max_blocks) {
if (percpu_counter_compare(&sbinfo->used_blocks,
sbinfo->max_blocks - pages) > 0)
goto unacct;
percpu_counter_add(&sbinfo->used_blocks, pages);
}
return true;
unacct:
shmem_unacct_blocks(info->flags, pages);
return false;
}
static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
{
struct shmem_inode_info *info = SHMEM_I(inode);
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
if (sbinfo->max_blocks)
percpu_counter_sub(&sbinfo->used_blocks, pages);
shmem_unacct_blocks(info->flags, pages);
}
static const struct super_operations shmem_ops; static const struct super_operations shmem_ops;
static const struct address_space_operations shmem_aops; static const struct address_space_operations shmem_aops;
static const struct file_operations shmem_file_operations; static const struct file_operations shmem_file_operations;
...@@ -249,31 +281,20 @@ static void shmem_recalc_inode(struct inode *inode) ...@@ -249,31 +281,20 @@ static void shmem_recalc_inode(struct inode *inode)
freed = info->alloced - info->swapped - inode->i_mapping->nrpages; freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
if (freed > 0) { if (freed > 0) {
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
if (sbinfo->max_blocks)
percpu_counter_add(&sbinfo->used_blocks, -freed);
info->alloced -= freed; info->alloced -= freed;
inode->i_blocks -= freed * BLOCKS_PER_PAGE; inode->i_blocks -= freed * BLOCKS_PER_PAGE;
shmem_unacct_blocks(info->flags, freed); shmem_inode_unacct_blocks(inode, freed);
} }
} }
bool shmem_charge(struct inode *inode, long pages) bool shmem_charge(struct inode *inode, long pages)
{ {
struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_inode_info *info = SHMEM_I(inode);
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
unsigned long flags; unsigned long flags;
if (shmem_acct_block(info->flags, pages)) if (!shmem_inode_acct_block(inode, pages))
return false; return false;
if (sbinfo->max_blocks) {
if (percpu_counter_compare(&sbinfo->used_blocks,
sbinfo->max_blocks - pages) > 0)
goto unacct;
percpu_counter_add(&sbinfo->used_blocks, pages);
}
spin_lock_irqsave(&info->lock, flags); spin_lock_irqsave(&info->lock, flags);
info->alloced += pages; info->alloced += pages;
inode->i_blocks += pages * BLOCKS_PER_PAGE; inode->i_blocks += pages * BLOCKS_PER_PAGE;
...@@ -282,16 +303,11 @@ bool shmem_charge(struct inode *inode, long pages) ...@@ -282,16 +303,11 @@ bool shmem_charge(struct inode *inode, long pages)
inode->i_mapping->nrpages += pages; inode->i_mapping->nrpages += pages;
return true; return true;
unacct:
shmem_unacct_blocks(info->flags, pages);
return false;
} }
void shmem_uncharge(struct inode *inode, long pages) void shmem_uncharge(struct inode *inode, long pages)
{ {
struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_inode_info *info = SHMEM_I(inode);
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&info->lock, flags); spin_lock_irqsave(&info->lock, flags);
...@@ -300,9 +316,7 @@ void shmem_uncharge(struct inode *inode, long pages) ...@@ -300,9 +316,7 @@ void shmem_uncharge(struct inode *inode, long pages)
shmem_recalc_inode(inode); shmem_recalc_inode(inode);
spin_unlock_irqrestore(&info->lock, flags); spin_unlock_irqrestore(&info->lock, flags);
if (sbinfo->max_blocks) shmem_inode_unacct_blocks(inode, pages);
percpu_counter_sub(&sbinfo->used_blocks, pages);
shmem_unacct_blocks(info->flags, pages);
} }
/* /*
...@@ -1451,9 +1465,10 @@ static struct page *shmem_alloc_page(gfp_t gfp, ...@@ -1451,9 +1465,10 @@ static struct page *shmem_alloc_page(gfp_t gfp,
} }
static struct page *shmem_alloc_and_acct_page(gfp_t gfp, static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
struct shmem_inode_info *info, struct shmem_sb_info *sbinfo, struct inode *inode,
pgoff_t index, bool huge) pgoff_t index, bool huge)
{ {
struct shmem_inode_info *info = SHMEM_I(inode);
struct page *page; struct page *page;
int nr; int nr;
int err = -ENOSPC; int err = -ENOSPC;
...@@ -1462,14 +1477,8 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp, ...@@ -1462,14 +1477,8 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
huge = false; huge = false;
nr = huge ? HPAGE_PMD_NR : 1; nr = huge ? HPAGE_PMD_NR : 1;
if (shmem_acct_block(info->flags, nr)) if (!shmem_inode_acct_block(inode, nr))
goto failed; goto failed;
if (sbinfo->max_blocks) {
if (percpu_counter_compare(&sbinfo->used_blocks,
sbinfo->max_blocks - nr) > 0)
goto unacct;
percpu_counter_add(&sbinfo->used_blocks, nr);
}
if (huge) if (huge)
page = shmem_alloc_hugepage(gfp, info, index); page = shmem_alloc_hugepage(gfp, info, index);
...@@ -1482,10 +1491,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp, ...@@ -1482,10 +1491,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
} }
err = -ENOMEM; err = -ENOMEM;
if (sbinfo->max_blocks) shmem_inode_unacct_blocks(inode, nr);
percpu_counter_add(&sbinfo->used_blocks, -nr);
unacct:
shmem_unacct_blocks(info->flags, nr);
failed: failed:
return ERR_PTR(err); return ERR_PTR(err);
} }
...@@ -1750,10 +1756,9 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1750,10 +1756,9 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
} }
alloc_huge: alloc_huge:
page = shmem_alloc_and_acct_page(gfp, info, sbinfo, page = shmem_alloc_and_acct_page(gfp, inode, index, true);
index, true);
if (IS_ERR(page)) { if (IS_ERR(page)) {
alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, info, sbinfo, alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, inode,
index, false); index, false);
} }
if (IS_ERR(page)) { if (IS_ERR(page)) {
...@@ -1875,10 +1880,7 @@ alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, info, sbinfo, ...@@ -1875,10 +1880,7 @@ alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
* Error recovery. * Error recovery.
*/ */
unacct: unacct:
if (sbinfo->max_blocks) shmem_inode_unacct_blocks(inode, 1 << compound_order(page));
percpu_counter_sub(&sbinfo->used_blocks,
1 << compound_order(page));
shmem_unacct_blocks(info->flags, 1 << compound_order(page));
if (PageTransHuge(page)) { if (PageTransHuge(page)) {
unlock_page(page); unlock_page(page);
...@@ -2214,7 +2216,6 @@ int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, ...@@ -2214,7 +2216,6 @@ int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
{ {
struct inode *inode = file_inode(dst_vma->vm_file); struct inode *inode = file_inode(dst_vma->vm_file);
struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_inode_info *info = SHMEM_I(inode);
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
gfp_t gfp = mapping_gfp_mask(mapping); gfp_t gfp = mapping_gfp_mask(mapping);
pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
...@@ -2226,19 +2227,13 @@ int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, ...@@ -2226,19 +2227,13 @@ int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
int ret; int ret;
ret = -ENOMEM; ret = -ENOMEM;
if (shmem_acct_block(info->flags, 1)) if (!shmem_inode_acct_block(inode, 1))
goto out; goto out;
if (sbinfo->max_blocks) {
if (percpu_counter_compare(&sbinfo->used_blocks,
sbinfo->max_blocks) >= 0)
goto out_unacct_blocks;
percpu_counter_inc(&sbinfo->used_blocks);
}
if (!*pagep) { if (!*pagep) {
page = shmem_alloc_page(gfp, info, pgoff); page = shmem_alloc_page(gfp, info, pgoff);
if (!page) if (!page)
goto out_dec_used_blocks; goto out_unacct_blocks;
page_kaddr = kmap_atomic(page); page_kaddr = kmap_atomic(page);
ret = copy_from_user(page_kaddr, (const void __user *)src_addr, ret = copy_from_user(page_kaddr, (const void __user *)src_addr,
...@@ -2248,9 +2243,7 @@ int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, ...@@ -2248,9 +2243,7 @@ int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
/* fallback to copy_from_user outside mmap_sem */ /* fallback to copy_from_user outside mmap_sem */
if (unlikely(ret)) { if (unlikely(ret)) {
*pagep = page; *pagep = page;
if (sbinfo->max_blocks) shmem_inode_unacct_blocks(inode, 1);
percpu_counter_add(&sbinfo->used_blocks, -1);
shmem_unacct_blocks(info->flags, 1);
/* don't free the page */ /* don't free the page */
return -EFAULT; return -EFAULT;
} }
...@@ -2313,11 +2306,8 @@ int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, ...@@ -2313,11 +2306,8 @@ int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
out_release: out_release:
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
out_dec_used_blocks:
if (sbinfo->max_blocks)
percpu_counter_add(&sbinfo->used_blocks, -1);
out_unacct_blocks: out_unacct_blocks:
shmem_unacct_blocks(info->flags, 1); shmem_inode_unacct_blocks(inode, 1);
goto out; goto out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment