Commit bced0520 authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds

memcg: fix gfp_mask of callers of charge

Fix misuse of gfp_kernel.

Now, most of callers of mem_cgroup_charge_xxx functions uses GFP_KERNEL.

I think that this is from the fact that page_cgroup *was* dynamically
allocated.

But now, we allocate all page_cgroup at boot.  And
mem_cgroup_try_to_free_pages() reclaim memory from GFP_HIGHUSER_MOVABLE +
specified GFP_RECLAIM_MASK.

  * This is because we just want to reduce memory usage.
    "Where we should reclaim from ?" is not a problem in memcg.

This patch modifies gfp masks to be GFP_HIGUSER_MOVABLE if possible.

Note: This patch is not for fixing behavior but for showing sane information
      in source code.
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <balbir@in.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7a81b88c
...@@ -808,8 +808,9 @@ int mem_cgroup_prepare_migration(struct page *page, struct page *newpage) ...@@ -808,8 +808,9 @@ int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
} }
unlock_page_cgroup(pc); unlock_page_cgroup(pc);
if (mem) { if (mem) {
ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL, ret = mem_cgroup_charge_common(newpage, NULL,
ctype, mem); GFP_HIGHUSER_MOVABLE,
ctype, mem);
css_put(&mem->css); css_put(&mem->css);
} }
return ret; return ret;
...@@ -889,7 +890,8 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, ...@@ -889,7 +890,8 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
ret = -EBUSY; ret = -EBUSY;
break; break;
} }
progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL); progress = try_to_free_mem_cgroup_pages(memcg,
GFP_HIGHUSER_MOVABLE);
if (!progress) if (!progress)
retry_count--; retry_count--;
} }
......
...@@ -2000,7 +2000,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2000,7 +2000,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
cow_user_page(new_page, old_page, address, vma); cow_user_page(new_page, old_page, address, vma);
__SetPageUptodate(new_page); __SetPageUptodate(new_page);
if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) if (mem_cgroup_newpage_charge(new_page, mm, GFP_HIGHUSER_MOVABLE))
goto oom_free_new; goto oom_free_new;
/* /*
...@@ -2431,7 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2431,7 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
lock_page(page); lock_page(page);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN); delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
if (mem_cgroup_try_charge(mm, GFP_KERNEL, &ptr) == -ENOMEM) { if (mem_cgroup_try_charge(mm, GFP_HIGHUSER_MOVABLE, &ptr) == -ENOMEM) {
ret = VM_FAULT_OOM; ret = VM_FAULT_OOM;
unlock_page(page); unlock_page(page);
goto out; goto out;
...@@ -2512,7 +2512,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2512,7 +2512,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto oom; goto oom;
__SetPageUptodate(page); __SetPageUptodate(page);
if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) if (mem_cgroup_newpage_charge(page, mm, GFP_HIGHUSER_MOVABLE))
goto oom_free_page; goto oom_free_page;
entry = mk_pte(page, vma->vm_page_prot); entry = mk_pte(page, vma->vm_page_prot);
...@@ -2603,7 +2603,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2603,7 +2603,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
ret = VM_FAULT_OOM; ret = VM_FAULT_OOM;
goto out; goto out;
} }
if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) { if (mem_cgroup_newpage_charge(page,
mm, GFP_HIGHUSER_MOVABLE)) {
ret = VM_FAULT_OOM; ret = VM_FAULT_OOM;
page_cache_release(page); page_cache_release(page);
goto out; goto out;
......
...@@ -928,8 +928,8 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s ...@@ -928,8 +928,8 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
error = 1; error = 1;
if (!inode) if (!inode)
goto out; goto out;
/* Precharge page using GFP_KERNEL while we can wait */ /* Charge page using GFP_HIGHUSER_MOVABLE while we can wait */
error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); error = mem_cgroup_cache_charge(page, current->mm, GFP_HIGHUSER_MOVABLE);
if (error) if (error)
goto out; goto out;
error = radix_tree_preload(GFP_KERNEL); error = radix_tree_preload(GFP_KERNEL);
...@@ -1379,7 +1379,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, ...@@ -1379,7 +1379,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
/* Precharge page while we can wait, compensate after */ /* Precharge page while we can wait, compensate after */
error = mem_cgroup_cache_charge(filepage, current->mm, error = mem_cgroup_cache_charge(filepage, current->mm,
gfp & ~__GFP_HIGHMEM); GFP_HIGHUSER_MOVABLE);
if (error) { if (error) {
page_cache_release(filepage); page_cache_release(filepage);
shmem_unacct_blocks(info->flags, 1); shmem_unacct_blocks(info->flags, 1);
......
...@@ -695,7 +695,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -695,7 +695,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
pte_t *pte; pte_t *pte;
int ret = 1; int ret = 1;
if (mem_cgroup_try_charge(vma->vm_mm, GFP_KERNEL, &ptr)) if (mem_cgroup_try_charge(vma->vm_mm, GFP_HIGHUSER_MOVABLE, &ptr))
ret = -ENOMEM; ret = -ENOMEM;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment